[monkeydoc] Merge/add monkeydoc to master.
authorJeremie Laval <jeremie.laval@gmail.com>
Fri, 23 Nov 2012 14:56:15 +0000 (14:56 +0000)
committerJeremie Laval <jeremie.laval@gmail.com>
Fri, 23 Nov 2012 14:56:15 +0000 (14:56 +0000)
The commit also contains tiny changes to existing xml documentation, restoring docs that were lost and a couple fixes to mdoc/monodoc to cope with the new system.

584 files changed:
docs/sources/mono-api-decimal.html [new file with mode: 0644]
mcs/build/library.make
mcs/class/Mono.Posix/Documentation/en/Mono.Posix/Syscall.xml
mcs/class/Mono.Security.Win32/Documentation/en/Mono.Security.Cryptography/MD2.xml [deleted file]
mcs/class/Mono.Security.Win32/Documentation/en/Mono.Security.Cryptography/MD4.xml [deleted file]
mcs/class/System/Documentation/en/System.Threading/SemaphoreFullException.xml [deleted file]
mcs/class/corlib/Documentation/en/System/Action.xml [deleted file]
mcs/class/corlib/Documentation/en/System/Action`1.xml [deleted file]
mcs/class/corlib/Documentation/en/System/Action`2.xml [deleted file]
mcs/class/corlib/Documentation/en/System/Action`3.xml [deleted file]
mcs/class/corlib/Documentation/en/System/Action`4.xml [deleted file]
mcs/class/corlib/Documentation/en/System/Func`1.xml [deleted file]
mcs/class/corlib/Documentation/en/System/Func`2.xml [deleted file]
mcs/class/corlib/Documentation/en/System/Func`3.xml [deleted file]
mcs/class/corlib/Documentation/en/System/Func`4.xml [deleted file]
mcs/class/corlib/Documentation/en/System/Func`5.xml [deleted file]
mcs/class/corlib/Documentation/en/System/InvalidTimeZoneException.xml [deleted file]
mcs/class/corlib/Documentation/en/System/TimeZoneInfo+AdjustmentRule.xml [deleted file]
mcs/class/corlib/Documentation/en/System/TimeZoneInfo+TransitionTime.xml [deleted file]
mcs/class/corlib/Documentation/en/System/TimeZoneInfo.xml [deleted file]
mcs/class/corlib/Documentation/en/System/TimeZoneNotFoundException.xml [deleted file]
mcs/tools/mdoc/Mono.Documentation/monodocer.cs
mcs/tools/monkeydoc/Assembly/AssemblyInfo.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/.gitattributes [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/ABOUT.txt [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/BUILD.txt [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/CHANGES.txt [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/HISTORY.txt [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net.dll.sources [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/.gitattributes [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/.gitattributes [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/ASCIIFoldingFilter.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/Analyzer.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/BaseCharFilter.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/CachingTokenFilter.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/CharArraySet.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/CharFilter.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/CharReader.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/CharStream.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/CharTokenizer.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/CharacterCache.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/ISOLatin1AccentFilter.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/KeywordAnalyzer.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/KeywordTokenizer.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/LengthFilter.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/LetterTokenizer.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/LowerCaseFilter.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/LowerCaseTokenizer.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/MappingCharFilter.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/NormalizeCharMap.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/NumericTokenStream.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/Package.html [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/PerFieldAnalyzerWrapper.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/PorterStemFilter.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/PorterStemmer.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/SimpleAnalyzer.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/SinkTokenizer.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/Standard/.gitattributes [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/Standard/Package.html [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/Standard/StandardAnalyzer.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/Standard/StandardFilter.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/Standard/StandardTokenizer.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/Standard/StandardTokenizerImpl.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/Standard/StandardTokenizerImpl.jflex [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/StopAnalyzer.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/StopFilter.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/TeeSinkTokenFilter.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/TeeTokenFilter.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/Token.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/TokenFilter.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/TokenStream.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/TokenWrapper.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/Tokenattributes/FlagsAttribute.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/Tokenattributes/FlagsAttributeImpl.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/Tokenattributes/OffsetAttribute.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/Tokenattributes/OffsetAttributeImpl.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/Tokenattributes/PayloadAttribute.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/Tokenattributes/PayloadAttributeImpl.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/Tokenattributes/PositionIncrementAttribute.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/Tokenattributes/PositionIncrementAttributeImpl.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/Tokenattributes/TermAttribute.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/Tokenattributes/TermAttributeImpl.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/Tokenattributes/TypeAttribute.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/Tokenattributes/TypeAttributeImpl.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/Tokenizer.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/WhitespaceAnalyzer.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/WhitespaceTokenizer.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/WordlistLoader.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Document/.gitattributes [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Document/AbstractField.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Document/CompressionTools.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Document/DateField.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Document/DateTools.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Document/Document.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Document/Field.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Document/FieldSelector.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Document/FieldSelectorResult.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Document/Fieldable.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Document/LoadFirstFieldSelector.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Document/MapFieldSelector.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Document/NumberTools.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Document/NumericField.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Document/Package.html [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Document/SetBasedFieldSelector.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/.gitattributes [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/AbstractAllTermDocs.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/AllTermDocs.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/BufferedDeletes.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/ByteBlockPool.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/ByteSliceReader.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/ByteSliceWriter.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/CharBlockPool.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/CheckIndex.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/CompoundFileReader.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/CompoundFileWriter.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/ConcurrentMergeScheduler.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/CorruptIndexException.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/DefaultSkipListReader.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/DefaultSkipListWriter.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/DirectoryOwningReader.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/DirectoryReader.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/DocConsumer.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/DocConsumerPerThread.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/DocFieldConsumer.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/DocFieldConsumerPerField.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/DocFieldConsumerPerThread.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/DocFieldConsumers.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/DocFieldConsumersPerField.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/DocFieldConsumersPerThread.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/DocFieldProcessor.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/DocFieldProcessorPerField.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/DocFieldProcessorPerThread.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/DocInverter.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/DocInverterPerField.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/DocInverterPerThread.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/DocumentsWriter.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/DocumentsWriterThreadState.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/FieldInfo.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/FieldInfos.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/FieldInvertState.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/FieldReaderException.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/FieldSortedTermVectorMapper.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/FieldsReader.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/FieldsWriter.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/FilterIndexReader.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/FormatPostingsDocsConsumer.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/FormatPostingsDocsWriter.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/FormatPostingsFieldsConsumer.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/FormatPostingsFieldsWriter.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/FormatPostingsPositionsConsumer.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/FormatPostingsPositionsWriter.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/FormatPostingsTermsConsumer.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/FormatPostingsTermsWriter.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/FreqProxFieldMergeState.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/FreqProxTermsWriter.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/FreqProxTermsWriterPerField.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/FreqProxTermsWriterPerThread.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/IndexCommit.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/IndexCommitPoint.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/IndexDeletionPolicy.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/IndexFileDeleter.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/IndexFileNameFilter.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/IndexFileNames.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/IndexModifier.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/IndexReader.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/IndexWriter.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/IntBlockPool.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/InvertedDocConsumer.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/InvertedDocConsumerPerField.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/InvertedDocConsumerPerThread.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/InvertedDocEndConsumer.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/InvertedDocEndConsumerPerField.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/InvertedDocEndConsumerPerThread.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/KeepOnlyLastCommitDeletionPolicy.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/LogByteSizeMergePolicy.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/LogDocMergePolicy.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/LogMergePolicy.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/MergeDocIDRemapper.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/MergePolicy.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/MergeScheduler.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/MultiLevelSkipListReader.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/MultiLevelSkipListWriter.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/MultiReader.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/MultipleTermPositions.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/NormsWriter.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/NormsWriterPerField.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/NormsWriterPerThread.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/Package.html [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/ParallelReader.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/Payload.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/PositionBasedTermVectorMapper.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/RawPostingList.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/ReadOnlyDirectoryReader.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/ReadOnlySegmentReader.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/ReusableStringReader.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/SegmentInfo.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/SegmentInfos.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/SegmentMergeInfo.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/SegmentMergeQueue.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/SegmentMerger.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/SegmentReader.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/SegmentTermDocs.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/SegmentTermEnum.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/SegmentTermPositionVector.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/SegmentTermPositions.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/SegmentTermVector.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/SegmentWriteState.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/SerialMergeScheduler.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/SnapshotDeletionPolicy.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/SortedTermVectorMapper.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/StaleReaderException.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/StoredFieldsWriter.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/StoredFieldsWriterPerThread.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/Term.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/TermBuffer.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/TermDocs.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/TermEnum.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/TermFreqVector.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/TermInfo.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/TermInfosReader.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/TermInfosWriter.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/TermPositionVector.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/TermPositions.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/TermVectorEntry.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/TermVectorEntryFreqSortedComparator.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/TermVectorMapper.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/TermVectorOffsetInfo.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/TermVectorsReader.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/TermVectorsTermsWriter.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/TermVectorsTermsWriterPerField.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/TermVectorsTermsWriterPerThread.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/TermVectorsWriter.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/TermsHash.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/TermsHashConsumer.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/TermsHashConsumerPerField.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/TermsHashConsumerPerThread.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/TermsHashPerField.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/TermsHashPerThread.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/LZOCompressor.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Lucene.Net.Search.RemoteSearchable.config [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Lucene.Net.Search.TestSort.config [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Lucene.Net.csproj [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Lucene.Net.ndoc [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Lucene.Net.snk [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/LucenePackage.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Messages/.gitattributes [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Messages/Message.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Messages/MessageImpl.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Messages/NLS.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Messages/NLSException.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Messages/Package.html [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Overview.html [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Package.html [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/QueryParser/.gitattributes [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/QueryParser/CharStream.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/QueryParser/FastCharStream.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/QueryParser/MultiFieldQueryParser.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/QueryParser/Package.html [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/QueryParser/ParseException.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/QueryParser/QueryParser.JJ [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/QueryParser/QueryParser.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/QueryParser/QueryParserConstants.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/QueryParser/QueryParserTokenManager.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/QueryParser/Token.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/QueryParser/TokenMgrError.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/.gitattributes [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/BooleanClause.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/BooleanQuery.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/BooleanScorer.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/BooleanScorer2.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/CachingSpanFilter.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/CachingWrapperFilter.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Collector.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/ComplexExplanation.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/ConjunctionScorer.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/ConstantScoreQuery.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/ConstantScoreRangeQuery.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/DefaultSimilarity.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/DisjunctionMaxQuery.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/DisjunctionMaxScorer.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/DisjunctionSumScorer.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/DocIdSet.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/DocIdSetIterator.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/ExactPhraseScorer.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Explanation.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/ExtendedFieldCache.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/FieldCache.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/FieldCacheImpl.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/FieldCacheRangeFilter.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/FieldCacheTermsFilter.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/FieldComparator.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/FieldComparatorSource.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/FieldDoc.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/FieldDocSortedHitQueue.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/FieldSortedHitQueue.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/FieldValueHitQueue.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Filter.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/FilterManager.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/FilteredDocIdSet.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/FilteredDocIdSetIterator.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/FilteredQuery.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/FilteredTermEnum.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Function/ByteFieldSource.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Function/CustomScoreProvider.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Function/CustomScoreQuery.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Function/DocValues.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Function/FieldCacheSource.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Function/FieldScoreQuery.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Function/FloatFieldSource.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Function/IntFieldSource.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Function/MultiValueSource.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Function/OrdFieldSource.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Function/Package.html [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Function/ReverseOrdFieldSource.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Function/ShortFieldSource.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Function/ValueSource.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Function/ValueSourceQuery.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/FuzzyQuery.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/FuzzyTermEnum.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Hit.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/HitCollector.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/HitCollectorWrapper.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/HitIterator.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/HitQueue.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Hits.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/IndexSearcher.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/MatchAllDocsQuery.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/MultiPhraseQuery.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/MultiSearcher.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/MultiTermQuery.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/MultiTermQueryWrapperFilter.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/NumericRangeFilter.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/NumericRangeQuery.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Package.html [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/ParallelMultiSearcher.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Payloads/AveragePayloadFunction.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Payloads/BoostingTermQuery.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Payloads/MaxPayloadFunction.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Payloads/MinPayloadFunction.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Payloads/Package.html [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Payloads/PayloadFunction.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Payloads/PayloadNearQuery.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Payloads/PayloadSpanUtil.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Payloads/PayloadTermQuery.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/PhrasePositions.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/PhraseQuery.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/PhraseQueue.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/PhraseScorer.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/PositiveScoresOnlyCollector.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/PrefixFilter.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/PrefixQuery.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/PrefixTermEnum.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Query.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/QueryFilter.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/QueryTermVector.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/QueryWrapperFilter.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/RangeFilter.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/RangeQuery.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/ReqExclScorer.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/ReqOptSumScorer.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/ScoreCachingWrappingScorer.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/ScoreDoc.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/ScoreDocComparator.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Scorer.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Searchable.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Searcher.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Similarity.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/SimilarityDelegator.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/SloppyPhraseScorer.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Sort.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/SortComparator.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/SortComparatorSource.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/SortField.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/SpanFilter.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/SpanFilterResult.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/SpanQueryFilter.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Spans/FieldMaskingSpanQuery.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Spans/NearSpansOrdered.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Spans/NearSpansUnordered.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Spans/Package.html [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Spans/SpanFirstQuery.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Spans/SpanNearQuery.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Spans/SpanNotQuery.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Spans/SpanOrQuery.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Spans/SpanQuery.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Spans/SpanScorer.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Spans/SpanTermQuery.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Spans/SpanWeight.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Spans/Spans.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Spans/TermSpans.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/TermQuery.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/TermRangeFilter.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/TermRangeQuery.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/TermRangeTermEnum.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/TermScorer.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/TimeLimitedCollector.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/TimeLimitingCollector.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/TopDocCollector.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/TopDocs.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/TopDocsCollector.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/TopFieldCollector.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/TopFieldDocCollector.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/TopFieldDocs.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/TopScoreDocCollector.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Weight.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/WildcardQuery.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/WildcardTermEnum.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/.gitattributes [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/AlreadyClosedException.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/BufferedIndexInput.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/BufferedIndexOutput.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/CheckSumIndexInput.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/CheckSumIndexOutput.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/Directory.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/FSDirectory.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/FSLockFactory.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/FileSwitchDirectory.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/IndexInput.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/IndexOutput.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/Lock.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/LockFactory.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/LockObtainFailedException.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/LockReleaseFailedException.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/LockStressTest.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/LockVerifyServer.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/MMapDirectory.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/NIOFSDirectory.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/NativeFSLockFactory.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/NoLockFactory.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/NoSuchDirectoryException.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/Package.html [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/RAMDirectory.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/RAMFile.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/RAMInputStream.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/RAMOutputStream.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/SimpleFSDirectory.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/SimpleFSLockFactory.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/SingleInstanceLockFactory.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/VerifyingLockFactory.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/SupportClass.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/.gitattributes [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/ArrayUtil.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/Attribute.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/AttributeImpl.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/AttributeSource.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/AverageGuessMemoryModel.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/BitUtil.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/BitVector.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/Cache/Cache.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/Cache/SimpleLRUCache.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/Cache/SimpleMapCache.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/CloseableThreadLocal.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/Constants.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/DocIdBitSet.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/FieldCacheSanityChecker.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/IndexableBinaryStringTools.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/MapOfSets.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/MemoryModel.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/NumericUtils.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/OpenBitSet.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/OpenBitSetDISI.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/OpenBitSetIterator.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/Package.html [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/Parameter.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/PriorityQueue.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/RamUsageEstimator.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/ReaderUtil.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/ScorerDocQueue.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/SimpleStringInterner.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/SmallFloat.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/SortedVIntList.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/SorterTemplate.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/StringHelper.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/StringInterner.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/ToStringUtils.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/UnicodeUtil.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/Version.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/lucene.net.project.nuspec [new file with mode: 0644]
mcs/tools/monkeydoc/Makefile [new file with mode: 0644]
mcs/tools/monkeydoc/Monkeydoc.Ecma/EcmaDesc.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Monkeydoc.Ecma/EcmaUrlParser.jay [new file with mode: 0644]
mcs/tools/monkeydoc/Monkeydoc.Ecma/EcmaUrlParserDriver.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Monkeydoc.Ecma/EcmaUrlTokenizer.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Monkeydoc/HelpSource.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Monkeydoc/Provider.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Monkeydoc/RootTree.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Monkeydoc/SearchableDocument.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Monkeydoc/SearchableIndex.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Monkeydoc/Tree.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Monkeydoc/TypeUtils.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Monkeydoc/cache.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Monkeydoc/caches/FileCache.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Monkeydoc/caches/NullCache.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Monkeydoc/generator.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Monkeydoc/generators/HtmlGenerator.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Monkeydoc/generators/html/Ecma2Html.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Monkeydoc/generators/html/Ecmaspec2Html.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Monkeydoc/generators/html/Error2Html.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Monkeydoc/generators/html/Idem.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Monkeydoc/generators/html/Man2Html.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Monkeydoc/generators/html/MonoBook2Html.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Monkeydoc/generators/html/Toc2Html.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Monkeydoc/index.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Monkeydoc/providers/addins-provider.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Monkeydoc/providers/ecma-provider.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Monkeydoc/providers/ecmaspec-provider.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Monkeydoc/providers/error-provider.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Monkeydoc/providers/man-provider.cs [new file with mode: 0755]
mcs/tools/monkeydoc/Monkeydoc/providers/simple-provider.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Monkeydoc/providers/xhtml-provider.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Monkeydoc/storage.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Monkeydoc/storage/ZipStorage.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Mono.Documentation/ManifestResourceResolver.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Mono.Documentation/XmlDocUtils.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Mono.Utilities/LRUCache.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Mono.Utilities/MemoryLRU.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Mono.Utilities/colorizer.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Resources/.gitattributes [new file with mode: 0644]
mcs/tools/monkeydoc/Resources/Lminus.gif [new file with mode: 0644]
mcs/tools/monkeydoc/Resources/Lplus.gif [new file with mode: 0644]
mcs/tools/monkeydoc/Resources/base.css [new file with mode: 0644]
mcs/tools/monkeydoc/Resources/ecmaspec-html-css.xsl [new file with mode: 0644]
mcs/tools/monkeydoc/Resources/ecmaspec-html.xsl [new file with mode: 0644]
mcs/tools/monkeydoc/Resources/ecmaspec.css [new file with mode: 0644]
mcs/tools/monkeydoc/Resources/helper.js [new file with mode: 0755]
mcs/tools/monkeydoc/Resources/home.html [new file with mode: 0644]
mcs/tools/monkeydoc/Resources/images/bc_bg.png [new file with mode: 0644]
mcs/tools/monkeydoc/Resources/images/bc_separator.png [new file with mode: 0644]
mcs/tools/monkeydoc/Resources/images/error.png [new file with mode: 0644]
mcs/tools/monkeydoc/Resources/images/hatch.png [new file with mode: 0644]
mcs/tools/monkeydoc/Resources/images/headerbg.png [new file with mode: 0644]
mcs/tools/monkeydoc/Resources/images/help.png [new file with mode: 0644]
mcs/tools/monkeydoc/Resources/images/house.png [new file with mode: 0644]
mcs/tools/monkeydoc/Resources/images/members.png [new file with mode: 0644]
mcs/tools/monkeydoc/Resources/images/namespace.png [new file with mode: 0644]
mcs/tools/monkeydoc/Resources/images/privclass.png [new file with mode: 0644]
mcs/tools/monkeydoc/Resources/images/privdelegate.png [new file with mode: 0644]
mcs/tools/monkeydoc/Resources/images/privenumeration.png [new file with mode: 0644]
mcs/tools/monkeydoc/Resources/images/privevent.png [new file with mode: 0644]
mcs/tools/monkeydoc/Resources/images/privextension.png [new file with mode: 0644]
mcs/tools/monkeydoc/Resources/images/privfield.png [new file with mode: 0644]
mcs/tools/monkeydoc/Resources/images/privinterface.png [new file with mode: 0644]
mcs/tools/monkeydoc/Resources/images/privmethod.png [new file with mode: 0644]
mcs/tools/monkeydoc/Resources/images/privproperty.png [new file with mode: 0644]
mcs/tools/monkeydoc/Resources/images/privstructure.png [new file with mode: 0644]
mcs/tools/monkeydoc/Resources/images/protclass.png [new file with mode: 0644]
mcs/tools/monkeydoc/Resources/images/protdelegate.png [new file with mode: 0644]
mcs/tools/monkeydoc/Resources/images/protenumeration.png [new file with mode: 0644]
mcs/tools/monkeydoc/Resources/images/protevent.png [new file with mode: 0644]
mcs/tools/monkeydoc/Resources/images/protextension.png [new file with mode: 0644]
mcs/tools/monkeydoc/Resources/images/protfield.png [new file with mode: 0644]
mcs/tools/monkeydoc/Resources/images/protinterface.png [new file with mode: 0644]
mcs/tools/monkeydoc/Resources/images/protmethod.png [new file with mode: 0644]
mcs/tools/monkeydoc/Resources/images/protproperty.png [new file with mode: 0644]
mcs/tools/monkeydoc/Resources/images/protstructure.png [new file with mode: 0644]
mcs/tools/monkeydoc/Resources/images/pubclass.png [new file with mode: 0644]
mcs/tools/monkeydoc/Resources/images/pubdelegate.png [new file with mode: 0644]
mcs/tools/monkeydoc/Resources/images/pubenumeration.png [new file with mode: 0644]
mcs/tools/monkeydoc/Resources/images/pubevent.png [new file with mode: 0644]
mcs/tools/monkeydoc/Resources/images/pubextension.png [new file with mode: 0644]
mcs/tools/monkeydoc/Resources/images/pubfield.png [new file with mode: 0644]
mcs/tools/monkeydoc/Resources/images/pubinterface.png [new file with mode: 0644]
mcs/tools/monkeydoc/Resources/images/pubmethod.png [new file with mode: 0644]
mcs/tools/monkeydoc/Resources/images/pubproperty.png [new file with mode: 0644]
mcs/tools/monkeydoc/Resources/images/pubstructure.png [new file with mode: 0644]
mcs/tools/monkeydoc/Resources/images/reference.png [new file with mode: 0644]
mcs/tools/monkeydoc/Resources/images/treebg.png [new file with mode: 0644]
mcs/tools/monkeydoc/Resources/mdoc-html-format.xsl [new file with mode: 0644]
mcs/tools/monkeydoc/Resources/mdoc-html-utils.xsl [new file with mode: 0644]
mcs/tools/monkeydoc/Resources/mdoc-sections-css.xsl [new file with mode: 0644]
mcs/tools/monkeydoc/Resources/mdoc-sections.xsl [new file with mode: 0644]
mcs/tools/monkeydoc/Resources/mono-ecma-css.xsl [new file with mode: 0644]
mcs/tools/monkeydoc/Resources/mono-ecma-impl.xsl [new file with mode: 0644]
mcs/tools/monkeydoc/Resources/mono-ecma.css [new file with mode: 0644]
mcs/tools/monkeydoc/Resources/mono-ecma.xsl [new file with mode: 0644]
mcs/tools/monkeydoc/Resources/toc-html.xsl [new file with mode: 0644]
mcs/tools/monkeydoc/Test/Monkeydoc.Ecma/EcmaUrlTests.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Test/Monkeydoc/HelpSourceTests.cs [new file with mode: 0644]
mcs/tools/monkeydoc/Test/monodoc/monodoc.xml [new file with mode: 0644]
mcs/tools/monkeydoc/monkeydoc.dll.config.in [new file with mode: 0644]
mcs/tools/monkeydoc/monkeydoc.dll.sources [new file with mode: 0644]
mcs/tools/monkeydoc/monkeydoc_test.dll.sources [new file with mode: 0644]
mcs/tools/monodoc/Monodoc/ecma-provider.cs
mcs/tools/monodoc/Monodoc/provider.cs

diff --git a/docs/sources/mono-api-decimal.html b/docs/sources/mono-api-decimal.html
new file mode 100644 (file)
index 0000000..41c9347
--- /dev/null
@@ -0,0 +1,19 @@
+<h1>Decimal Support</h1>
+
+       <p>You can use the mono_decimal functions to access and
+       manipulate <tt>System.Decimal</tt> types from C.
+       
+<h4><a name="api:mono_decimal2double">mono_decimal2double</a></h4>
+<h4><a name="api:mono_decimal2Int64">mono_decimal2Int64</a></h4>
+<h4><a name="api:mono_decimal2string">mono_decimal2string</a></h4>
+<h4><a name="api:mono_decimal2UInt64">mono_decimal2UInt64</a></h4>
+<h4><a name="api:mono_decimalCompare">mono_decimalCompare</a></h4>
+<h4><a name="api:mono_decimalDiv">mono_decimalDiv</a></h4>
+<h4><a name="api:mono_decimalFloorAndTrunc">mono_decimalFloorAndTrunc</a></h4>
+<h4><a name="api:mono_decimalIncr">mono_decimalIncr</a></h4>
+<h4><a name="api:mono_decimalIntDiv">mono_decimalIntDiv</a></h4>
+<h4><a name="api:mono_decimalMult">mono_decimalMult</a></h4>
+<h4><a name="api:mono_decimalRound">mono_decimalRound</a></h4>
+<h4><a name="api:mono_decimalSetExponent">mono_decimalSetExponent</a></h4>
+<h4><a name="api:mono_double2decimal">mono_double2decimal</a></h4>
+<h4><a name="api:mono_string2decimal">mono_string2decimal</a></h4>
index ad8d8e04604bde9c718863270f1e4a8d798c279c..768149e9d1066b1ba99eab7e122d86dfef6181d7 100644 (file)
@@ -310,7 +310,7 @@ $(makefrag) $(test_response) $(test_makefrag) $(btest_response) $(btest_makefrag
 Q_MDOC_UP=$(if $(V),,@echo "MDOC-UP [$(PROFILE)] $(notdir $(@))";)
 # net_2_0 is needed because monodoc is only compiled in that profile
 MDOC_UP  =$(Q_MDOC_UP) \
-               MONO_PATH="$(topdir)/class/lib/$(DEFAULT_PROFILE)$(PLATFORM_PATH_SEPARATOR)$(topdir)/class/lib/net_2_0$(PLATFORM_PATH_SEPARATOR)$$MONO_PATH" $(RUNTIME) $(topdir)/tools/mdoc/mdoc.exe \
+               MONO_PATH="$(topdir)/class/lib/$(DEFAULT_PROFILE)$(PLATFORM_PATH_SEPARATOR)$(topdir)/class/lib/net_2_0$(PLATFORM_PATH_SEPARATOR)$$MONO_PATH" $(RUNTIME) $(topdir)/class/lib/$(DEFAULT_PROFILE)/mdoc.exe \
                update --delete -o Documentation/en $(the_lib)
 
 doc-update-local: $(the_libdir)/.doc-stamp
index ac0aa729e51a7c24e32c6cf4dbd330b2b7ace7be..e71898ffddec8b6c85d40eae4eb29e6009d32fda 100644 (file)
@@ -684,32 +684,6 @@ class Test
         <remarks>
           <para>This member is obsolete.  Please use
           <see cref="P:Mono.Unix.UnixEnvironment.MachineName" /> instead.</para>
-          <para>See also GetHostName()</para>
-        </remarks>
-        <altmember cref="P:Mono.Unix.UnixEnvironment.MachineName" />
-        <altmember cref="M:Mono.Unix.Native.Syscall.gethostname" />
-      </Docs>
-    </Member>
-    <Member MemberName="GetHostName">
-      <MemberSignature Language="C#" Value="public static string GetHostName ();" />
-      <MemberSignature Language="ILAsm" Value=".method public static hidebysig string GetHostName() cil managed" />
-      <MemberType>Method</MemberType>
-      <AssemblyInfo>
-        <AssemblyVersion>1.0.5000.0</AssemblyVersion>
-        <AssemblyVersion>2.0.0.0</AssemblyVersion>
-        <AssemblyVersion>4.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-      <ReturnValue>
-        <ReturnType>System.String</ReturnType>
-      </ReturnValue>
-      <Parameters />
-      <Docs>
-        <summary>Retrieves the hostname.</summary>
-        <returns>a <see cref="T:System.String" /></returns>
-        <remarks>
-          <para>This member is obsolete.  Please use
-          <see cref="P:Mono.Unix.UnixEnvironment.MachineName" /> instead.</para>
-          <para>Either GetHostName() should be removed, since gethostname() also exists, or the entire library should be CamelCased per .NET standard.</para>
         </remarks>
         <altmember cref="P:Mono.Unix.UnixEnvironment.MachineName" />
         <altmember cref="M:Mono.Unix.Native.Syscall.gethostname" />
diff --git a/mcs/class/Mono.Security.Win32/Documentation/en/Mono.Security.Cryptography/MD2.xml b/mcs/class/Mono.Security.Win32/Documentation/en/Mono.Security.Cryptography/MD2.xml
deleted file mode 100644 (file)
index f111ba1..0000000
+++ /dev/null
@@ -1,72 +0,0 @@
-<Type Name="MD2" FullName="Mono.Security.Cryptography.MD2">
-  <TypeSignature Language="C#" Maintainer="auto" Value="public abstract class MD2 : System.Security.Cryptography.HashAlgorithm" />
-  <AssemblyInfo>
-    <AssemblyName>Mono.Security.Win32</AssemblyName>
-    <AssemblyPublicKey>[00 24 00 00 04 80 00 00 94 00 00 00 06 02 00 00 00 24 00 00 52 53 41 31 00 04 00 00 01 00 01 00 79 15 99 77 D2 D0 3A 8E 6B EA 7A 2E 74 E8 D1 AF CC 93 E8 85 19 74 95 2B B4 80 A1 2C 91 34 47 4D 04 06 24 47 C3 7E 0E 68 C0 80 53 6F CF 3C 3F BE 2F F9 C9 79 CE 99 84 75 E5 06 E8 CE 82 DD 5B 0F 35 0D C1 0E 93 BF 2E EE CF 87 4B 24 77 0C 50 81 DB EA 74 47 FD DA FA 27 7B 22 DE 47 D6 FF EA 44 96 74 A4 F9 FC CF 84 D1 50 69 08 93 80 28 4D BD D3 5F 46 CD FF 12 A1 BD 78 E4 EF 00 65 D0 16 DF]</AssemblyPublicKey>
-    <AssemblyVersion>1.0.5000.0</AssemblyVersion>
-    <AssemblyVersion>2.0.0.0</AssemblyVersion>
-  </AssemblyInfo>
-  <ThreadSafetyStatement>Gtk# is thread aware, but not thread safe; See the <link location="node:gtk-sharp/programming/threads">Gtk# Thread Programming</link> for details.</ThreadSafetyStatement>
-  <Base>
-    <BaseTypeName>System.Security.Cryptography.HashAlgorithm</BaseTypeName>
-  </Base>
-  <Interfaces>
-  </Interfaces>
-  <Docs>
-    <summary>Common base class for all derived MD2 implementations.</summary>
-    <remarks>This class isn't CryptoAPI related. It is included here so that Mono.Security.dll doesn't have any dependencies on assemblies other than mscorlib.dll.</remarks>
-  </Docs>
-  <Members>
-    <Member MemberName=".ctor">
-      <MemberSignature Language="C#" Value="protected MD2 ();" />
-      <MemberType>Constructor</MemberType>
-      <ReturnValue />
-      <Parameters />
-      <Docs>
-        <summary>To be added</summary>
-        <remarks>To be added</remarks>
-      </Docs>
-      <AssemblyInfo>
-        <AssemblyVersion>1.0.5000.0</AssemblyVersion>
-        <AssemblyVersion>2.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-    </Member>
-    <Member MemberName="Create">
-      <MemberSignature Language="C#" Value="public static Mono.Security.Cryptography.MD2 Create ();" />
-      <MemberType>Method</MemberType>
-      <ReturnValue>
-        <ReturnType>Mono.Security.Cryptography.MD2</ReturnType>
-      </ReturnValue>
-      <Parameters />
-      <Docs>
-        <summary>Creates the default derived class.</summary>
-        <returns>a <see cref="T:Mono.Security.Cryptography.MD2" /></returns>
-        <remarks>To be added</remarks>
-      </Docs>
-      <AssemblyInfo>
-        <AssemblyVersion>1.0.5000.0</AssemblyVersion>
-        <AssemblyVersion>2.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-    </Member>
-    <Member MemberName="Create">
-      <MemberSignature Language="C#" Value="public static Mono.Security.Cryptography.MD2 Create (string hashName);" />
-      <MemberType>Method</MemberType>
-      <ReturnValue>
-        <ReturnType>Mono.Security.Cryptography.MD2</ReturnType>
-      </ReturnValue>
-      <Parameters>
-        <Parameter Name="hashName" Type="System.String" />
-      </Parameters>
-      <Docs>
-        <param name="hashName">Specifies which derived class to create.</param>
-        <summary>Creates a new derived class.</summary>
-        <returns>a <see cref="T:Mono.Security.Cryptography.MD2" /></returns>
-        <remarks>To be added</remarks>
-      </Docs>
-      <AssemblyInfo>
-        <AssemblyVersion>1.0.5000.0</AssemblyVersion>
-        <AssemblyVersion>2.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-    </Member>
-  </Members>
-</Type>
diff --git a/mcs/class/Mono.Security.Win32/Documentation/en/Mono.Security.Cryptography/MD4.xml b/mcs/class/Mono.Security.Win32/Documentation/en/Mono.Security.Cryptography/MD4.xml
deleted file mode 100644 (file)
index 73cb844..0000000
+++ /dev/null
@@ -1,72 +0,0 @@
-<Type Name="MD4" FullName="Mono.Security.Cryptography.MD4">
-  <TypeSignature Language="C#" Maintainer="auto" Value="public abstract class MD4 : System.Security.Cryptography.HashAlgorithm" />
-  <AssemblyInfo>
-    <AssemblyName>Mono.Security.Win32</AssemblyName>
-    <AssemblyPublicKey>[00 24 00 00 04 80 00 00 94 00 00 00 06 02 00 00 00 24 00 00 52 53 41 31 00 04 00 00 01 00 01 00 79 15 99 77 D2 D0 3A 8E 6B EA 7A 2E 74 E8 D1 AF CC 93 E8 85 19 74 95 2B B4 80 A1 2C 91 34 47 4D 04 06 24 47 C3 7E 0E 68 C0 80 53 6F CF 3C 3F BE 2F F9 C9 79 CE 99 84 75 E5 06 E8 CE 82 DD 5B 0F 35 0D C1 0E 93 BF 2E EE CF 87 4B 24 77 0C 50 81 DB EA 74 47 FD DA FA 27 7B 22 DE 47 D6 FF EA 44 96 74 A4 F9 FC CF 84 D1 50 69 08 93 80 28 4D BD D3 5F 46 CD FF 12 A1 BD 78 E4 EF 00 65 D0 16 DF]</AssemblyPublicKey>
-    <AssemblyVersion>1.0.5000.0</AssemblyVersion>
-    <AssemblyVersion>2.0.0.0</AssemblyVersion>
-  </AssemblyInfo>
-  <ThreadSafetyStatement>Gtk# is thread aware, but not thread safe; See the <link location="node:gtk-sharp/programming/threads">Gtk# Thread Programming</link> for details.</ThreadSafetyStatement>
-  <Base>
-    <BaseTypeName>System.Security.Cryptography.HashAlgorithm</BaseTypeName>
-  </Base>
-  <Interfaces>
-  </Interfaces>
-  <Docs>
-    <summary>Common base class for all derived MD4 implementations. </summary>
-    <remarks>This class isn't CryptoAPI related. It is included here so that Mono.Security.dll doesn't have any dependencies on assemblies other than mscorlib.dll.</remarks>
-  </Docs>
-  <Members>
-    <Member MemberName=".ctor">
-      <MemberSignature Language="C#" Value="protected MD4 ();" />
-      <MemberType>Constructor</MemberType>
-      <ReturnValue />
-      <Parameters />
-      <Docs>
-        <summary>To be added</summary>
-        <remarks>To be added</remarks>
-      </Docs>
-      <AssemblyInfo>
-        <AssemblyVersion>1.0.5000.0</AssemblyVersion>
-        <AssemblyVersion>2.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-    </Member>
-    <Member MemberName="Create">
-      <MemberSignature Language="C#" Value="public static Mono.Security.Cryptography.MD4 Create ();" />
-      <MemberType>Method</MemberType>
-      <ReturnValue>
-        <ReturnType>Mono.Security.Cryptography.MD4</ReturnType>
-      </ReturnValue>
-      <Parameters />
-      <Docs>
-        <summary>Creates the default derived class.</summary>
-        <returns>a <see cref="T:Mono.Security.Cryptography.MD4" /></returns>
-        <remarks>To be added</remarks>
-      </Docs>
-      <AssemblyInfo>
-        <AssemblyVersion>1.0.5000.0</AssemblyVersion>
-        <AssemblyVersion>2.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-    </Member>
-    <Member MemberName="Create">
-      <MemberSignature Language="C#" Value="public static Mono.Security.Cryptography.MD4 Create (string hashName);" />
-      <MemberType>Method</MemberType>
-      <ReturnValue>
-        <ReturnType>Mono.Security.Cryptography.MD4</ReturnType>
-      </ReturnValue>
-      <Parameters>
-        <Parameter Name="hashName" Type="System.String" />
-      </Parameters>
-      <Docs>
-        <param name="hashName">Specifies which derived class to create.</param>
-        <summary>Creates a new derived class.</summary>
-        <returns>a <see cref="T:Mono.Security.Cryptography.MD4" /></returns>
-        <remarks>To be added</remarks>
-      </Docs>
-      <AssemblyInfo>
-        <AssemblyVersion>1.0.5000.0</AssemblyVersion>
-        <AssemblyVersion>2.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-    </Member>
-  </Members>
-</Type>
diff --git a/mcs/class/System/Documentation/en/System.Threading/SemaphoreFullException.xml b/mcs/class/System/Documentation/en/System.Threading/SemaphoreFullException.xml
deleted file mode 100644 (file)
index 0da2482..0000000
+++ /dev/null
@@ -1,88 +0,0 @@
-<Type Name="SemaphoreFullException" FullName="System.Threading.SemaphoreFullException">
-  <TypeSignature Language="C#" Value="public class SemaphoreFullException : SystemException" />
-  <AssemblyInfo>
-    <AssemblyName>System</AssemblyName>
-    <AssemblyVersion>2.0.0.0</AssemblyVersion>
-  </AssemblyInfo>
-  <Base>
-    <BaseTypeName>System.SystemException</BaseTypeName>
-  </Base>
-  <Interfaces />
-  <Attributes>
-    <Attribute>
-      <AttributeName>System.Runtime.InteropServices.ComVisible(false)</AttributeName>
-    </Attribute>
-  </Attributes>
-  <Docs>
-    <summary>To be added.</summary>
-    <remarks>To be added.</remarks>
-    <since version=".NET 2.0" />
-  </Docs>
-  <Members>
-    <Member MemberName=".ctor">
-      <MemberSignature Language="C#" Value="public SemaphoreFullException ();" />
-      <MemberType>Constructor</MemberType>
-      <Parameters />
-      <Docs>
-        <summary>To be added.</summary>
-        <remarks>To be added.</remarks>
-        <since version=".NET 2.0" />
-      </Docs>
-      <AssemblyInfo>
-        <AssemblyVersion>2.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-    </Member>
-    <Member MemberName=".ctor">
-      <MemberSignature Language="C#" Value="public SemaphoreFullException (string message);" />
-      <MemberType>Constructor</MemberType>
-      <Parameters>
-        <Parameter Name="message" Type="System.String" />
-      </Parameters>
-      <Docs>
-        <param name="message">To be added.</param>
-        <summary>To be added.</summary>
-        <remarks>To be added.</remarks>
-        <since version=".NET 2.0" />
-      </Docs>
-      <AssemblyInfo>
-        <AssemblyVersion>2.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-    </Member>
-    <Member MemberName=".ctor">
-      <MemberSignature Language="C#" Value="protected SemaphoreFullException (System.Runtime.Serialization.SerializationInfo info, System.Runtime.Serialization.StreamingContext context);" />
-      <MemberType>Constructor</MemberType>
-      <Parameters>
-        <Parameter Name="info" Type="System.Runtime.Serialization.SerializationInfo" />
-        <Parameter Name="context" Type="System.Runtime.Serialization.StreamingContext" />
-      </Parameters>
-      <Docs>
-        <param name="info">To be added.</param>
-        <param name="context">To be added.</param>
-        <summary>To be added.</summary>
-        <remarks>To be added.</remarks>
-        <since version=".NET 2.0" />
-      </Docs>
-      <AssemblyInfo>
-        <AssemblyVersion>2.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-    </Member>
-    <Member MemberName=".ctor">
-      <MemberSignature Language="C#" Value="public SemaphoreFullException (string message, Exception innerException);" />
-      <MemberType>Constructor</MemberType>
-      <Parameters>
-        <Parameter Name="message" Type="System.String" />
-        <Parameter Name="innerException" Type="System.Exception" />
-      </Parameters>
-      <Docs>
-        <param name="message">To be added.</param>
-        <param name="innerException">To be added.</param>
-        <summary>To be added.</summary>
-        <remarks>To be added.</remarks>
-        <since version=".NET 2.0" />
-      </Docs>
-      <AssemblyInfo>
-        <AssemblyVersion>2.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-    </Member>
-  </Members>
-</Type>
diff --git a/mcs/class/corlib/Documentation/en/System/Action.xml b/mcs/class/corlib/Documentation/en/System/Action.xml
deleted file mode 100644 (file)
index 724af18..0000000
+++ /dev/null
@@ -1,23 +0,0 @@
-<Type Name="Action" FullName="System.Action">
-  <TypeSignature Language="C#" Value="public delegate void Action();" />
-  <AssemblyInfo>
-    <AssemblyName>mscorlib</AssemblyName>
-    <AssemblyVersion>4.0.0.0</AssemblyVersion>
-  </AssemblyInfo>
-  <Base>
-    <BaseTypeName>System.Delegate</BaseTypeName>
-  </Base>
-  <Attributes>
-    <Attribute>
-      <AttributeName>System.Runtime.CompilerServices.TypeForwardedFrom("System.Core, Version=3.5.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089")</AttributeName>
-    </Attribute>
-  </Attributes>
-  <Parameters />
-  <ReturnValue>
-    <ReturnType>System.Void</ReturnType>
-  </ReturnValue>
-  <Docs>
-    <summary>To be added.</summary>
-    <remarks>To be added.</remarks>
-  </Docs>
-</Type>
diff --git a/mcs/class/corlib/Documentation/en/System/Action`1.xml b/mcs/class/corlib/Documentation/en/System/Action`1.xml
deleted file mode 100644 (file)
index accc1e8..0000000
+++ /dev/null
@@ -1,53 +0,0 @@
-<Type Name="Action&lt;T&gt;" FullName="System.Action&lt;T&gt;">
-  <TypeSignature Language="C#" Value="public delegate void Action&lt;in T&gt;(T obj);" />
-  <AssemblyInfo>
-    <AssemblyName>mscorlib</AssemblyName>
-    <AssemblyVersion>2.0.0.0</AssemblyVersion>
-    <AssemblyVersion>4.0.0.0</AssemblyVersion>
-  </AssemblyInfo>
-  <TypeParameters>
-    <TypeParameter Name="T">
-      <Constraints>
-        <ParameterAttribute>Contravariant</ParameterAttribute>
-      </Constraints>
-    </TypeParameter>
-  </TypeParameters>
-  <Base>
-    <BaseTypeName>System.Delegate</BaseTypeName>
-  </Base>
-  <Parameters>
-    <Parameter Name="obj" Type="T" />
-  </Parameters>
-  <ReturnValue>
-    <ReturnType>System.Void</ReturnType>
-  </ReturnValue>
-  <Docs>
-    <typeparam name="T">The type.   </typeparam>
-    <param name="obj">The object on which to perform an action.</param>
-    <summary>A delegate to a method that returns no values, and takes one parameter.</summary>
-    <remarks>Since the compiler can infer the types, you do not need to provide the type, this is typically used with the <see cref="T:System.Array.Foreach" /> method or the <see cref="T:System.Collections.Generic.List" />'s ForEach method.
-
-<example><code lang="C#">
-//
-// Simple "echo" implementation
-//
-using System;
-
-class X {
-
-       static void Main (string [] args)
-       {
-               Array.ForEach (args, print);
-               Console.WriteLine ();
-       }
-
-       static void print (string a)
-       {
-               Console.Write (a);
-               Console.Write (" ");
-       }
-}
-  </code></example></remarks>
-    <since version=".NET 2.0" />
-  </Docs>
-</Type>
diff --git a/mcs/class/corlib/Documentation/en/System/Action`2.xml b/mcs/class/corlib/Documentation/en/System/Action`2.xml
deleted file mode 100644 (file)
index 8241ea6..0000000
+++ /dev/null
@@ -1,42 +0,0 @@
-<Type Name="Action&lt;T1,T2&gt;" FullName="System.Action&lt;T1,T2&gt;">
-  <TypeSignature Language="C#" Value="public delegate void Action&lt;in T1,in T2&gt;(T1 arg1, T2 arg2);" />
-  <AssemblyInfo>
-    <AssemblyName>mscorlib</AssemblyName>
-    <AssemblyVersion>4.0.0.0</AssemblyVersion>
-  </AssemblyInfo>
-  <TypeParameters>
-    <TypeParameter Name="T1">
-      <Constraints>
-        <ParameterAttribute>Contravariant</ParameterAttribute>
-      </Constraints>
-    </TypeParameter>
-    <TypeParameter Name="T2">
-      <Constraints>
-        <ParameterAttribute>Contravariant</ParameterAttribute>
-      </Constraints>
-    </TypeParameter>
-  </TypeParameters>
-  <Base>
-    <BaseTypeName>System.Delegate</BaseTypeName>
-  </Base>
-  <Attributes>
-    <Attribute>
-      <AttributeName>System.Runtime.CompilerServices.TypeForwardedFrom("System.Core, Version=3.5.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089")</AttributeName>
-    </Attribute>
-  </Attributes>
-  <Parameters>
-    <Parameter Name="arg1" Type="T1" />
-    <Parameter Name="arg2" Type="T2" />
-  </Parameters>
-  <ReturnValue>
-    <ReturnType>System.Void</ReturnType>
-  </ReturnValue>
-  <Docs>
-    <typeparam name="T1">To be added.</typeparam>
-    <typeparam name="T2">To be added.</typeparam>
-    <param name="arg1">To be added.</param>
-    <param name="arg2">To be added.</param>
-    <summary>To be added.</summary>
-    <remarks>To be added.</remarks>
-  </Docs>
-</Type>
diff --git a/mcs/class/corlib/Documentation/en/System/Action`3.xml b/mcs/class/corlib/Documentation/en/System/Action`3.xml
deleted file mode 100644 (file)
index b535b47..0000000
+++ /dev/null
@@ -1,50 +0,0 @@
-<Type Name="Action&lt;T1,T2,T3&gt;" FullName="System.Action&lt;T1,T2,T3&gt;">
-  <TypeSignature Language="C#" Value="public delegate void Action&lt;in T1,in T2,in T3&gt;(T1 arg1, T2 arg2, T3 arg3);" />
-  <AssemblyInfo>
-    <AssemblyName>mscorlib</AssemblyName>
-    <AssemblyVersion>4.0.0.0</AssemblyVersion>
-  </AssemblyInfo>
-  <TypeParameters>
-    <TypeParameter Name="T1">
-      <Constraints>
-        <ParameterAttribute>Contravariant</ParameterAttribute>
-      </Constraints>
-    </TypeParameter>
-    <TypeParameter Name="T2">
-      <Constraints>
-        <ParameterAttribute>Contravariant</ParameterAttribute>
-      </Constraints>
-    </TypeParameter>
-    <TypeParameter Name="T3">
-      <Constraints>
-        <ParameterAttribute>Contravariant</ParameterAttribute>
-      </Constraints>
-    </TypeParameter>
-  </TypeParameters>
-  <Base>
-    <BaseTypeName>System.Delegate</BaseTypeName>
-  </Base>
-  <Attributes>
-    <Attribute>
-      <AttributeName>System.Runtime.CompilerServices.TypeForwardedFrom("System.Core, Version=3.5.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089")</AttributeName>
-    </Attribute>
-  </Attributes>
-  <Parameters>
-    <Parameter Name="arg1" Type="T1" />
-    <Parameter Name="arg2" Type="T2" />
-    <Parameter Name="arg3" Type="T3" />
-  </Parameters>
-  <ReturnValue>
-    <ReturnType>System.Void</ReturnType>
-  </ReturnValue>
-  <Docs>
-    <typeparam name="T1">To be added.</typeparam>
-    <typeparam name="T2">To be added.</typeparam>
-    <typeparam name="T3">To be added.</typeparam>
-    <param name="arg1">To be added.</param>
-    <param name="arg2">To be added.</param>
-    <param name="arg3">To be added.</param>
-    <summary>To be added.</summary>
-    <remarks>To be added.</remarks>
-  </Docs>
-</Type>
diff --git a/mcs/class/corlib/Documentation/en/System/Action`4.xml b/mcs/class/corlib/Documentation/en/System/Action`4.xml
deleted file mode 100644 (file)
index 0c0ba7b..0000000
+++ /dev/null
@@ -1,58 +0,0 @@
-<Type Name="Action&lt;T1,T2,T3,T4&gt;" FullName="System.Action&lt;T1,T2,T3,T4&gt;">
-  <TypeSignature Language="C#" Value="public delegate void Action&lt;in T1,in T2,in T3,in T4&gt;(T1 arg1, T2 arg2, T3 arg3, T4 arg4);" />
-  <AssemblyInfo>
-    <AssemblyName>mscorlib</AssemblyName>
-    <AssemblyVersion>4.0.0.0</AssemblyVersion>
-  </AssemblyInfo>
-  <TypeParameters>
-    <TypeParameter Name="T1">
-      <Constraints>
-        <ParameterAttribute>Contravariant</ParameterAttribute>
-      </Constraints>
-    </TypeParameter>
-    <TypeParameter Name="T2">
-      <Constraints>
-        <ParameterAttribute>Contravariant</ParameterAttribute>
-      </Constraints>
-    </TypeParameter>
-    <TypeParameter Name="T3">
-      <Constraints>
-        <ParameterAttribute>Contravariant</ParameterAttribute>
-      </Constraints>
-    </TypeParameter>
-    <TypeParameter Name="T4">
-      <Constraints>
-        <ParameterAttribute>Contravariant</ParameterAttribute>
-      </Constraints>
-    </TypeParameter>
-  </TypeParameters>
-  <Base>
-    <BaseTypeName>System.Delegate</BaseTypeName>
-  </Base>
-  <Attributes>
-    <Attribute>
-      <AttributeName>System.Runtime.CompilerServices.TypeForwardedFrom("System.Core, Version=3.5.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089")</AttributeName>
-    </Attribute>
-  </Attributes>
-  <Parameters>
-    <Parameter Name="arg1" Type="T1" />
-    <Parameter Name="arg2" Type="T2" />
-    <Parameter Name="arg3" Type="T3" />
-    <Parameter Name="arg4" Type="T4" />
-  </Parameters>
-  <ReturnValue>
-    <ReturnType>System.Void</ReturnType>
-  </ReturnValue>
-  <Docs>
-    <typeparam name="T1">To be added.</typeparam>
-    <typeparam name="T2">To be added.</typeparam>
-    <typeparam name="T3">To be added.</typeparam>
-    <typeparam name="T4">To be added.</typeparam>
-    <param name="arg1">To be added.</param>
-    <param name="arg2">To be added.</param>
-    <param name="arg3">To be added.</param>
-    <param name="arg4">To be added.</param>
-    <summary>To be added.</summary>
-    <remarks>To be added.</remarks>
-  </Docs>
-</Type>
diff --git a/mcs/class/corlib/Documentation/en/System/Func`1.xml b/mcs/class/corlib/Documentation/en/System/Func`1.xml
deleted file mode 100644 (file)
index a83dfa2..0000000
+++ /dev/null
@@ -1,32 +0,0 @@
-<Type Name="Func&lt;TResult&gt;" FullName="System.Func&lt;TResult&gt;">
-  <TypeSignature Language="C#" Value="public delegate TResult Func&lt;out TResult&gt;();" />
-  <AssemblyInfo>
-    <AssemblyName>mscorlib</AssemblyName>
-    <AssemblyVersion>4.0.0.0</AssemblyVersion>
-  </AssemblyInfo>
-  <TypeParameters>
-    <TypeParameter Name="TResult">
-      <Constraints>
-        <ParameterAttribute>Covariant</ParameterAttribute>
-      </Constraints>
-    </TypeParameter>
-  </TypeParameters>
-  <Base>
-    <BaseTypeName>System.Delegate</BaseTypeName>
-  </Base>
-  <Attributes>
-    <Attribute>
-      <AttributeName>System.Runtime.CompilerServices.TypeForwardedFrom("System.Core, Version=3.5.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089")</AttributeName>
-    </Attribute>
-  </Attributes>
-  <Parameters />
-  <ReturnValue>
-    <ReturnType>TResult</ReturnType>
-  </ReturnValue>
-  <Docs>
-    <typeparam name="TResult">To be added.</typeparam>
-    <summary>To be added.</summary>
-    <value>To be added.</value>
-    <remarks>To be added.</remarks>
-  </Docs>
-</Type>
diff --git a/mcs/class/corlib/Documentation/en/System/Func`2.xml b/mcs/class/corlib/Documentation/en/System/Func`2.xml
deleted file mode 100644 (file)
index 54d8e04..0000000
+++ /dev/null
@@ -1,41 +0,0 @@
-<Type Name="Func&lt;T,TResult&gt;" FullName="System.Func&lt;T,TResult&gt;">
-  <TypeSignature Language="C#" Value="public delegate TResult Func&lt;in T,out TResult&gt;(T arg1);" />
-  <AssemblyInfo>
-    <AssemblyName>mscorlib</AssemblyName>
-    <AssemblyVersion>4.0.0.0</AssemblyVersion>
-  </AssemblyInfo>
-  <TypeParameters>
-    <TypeParameter Name="T">
-      <Constraints>
-        <ParameterAttribute>Contravariant</ParameterAttribute>
-      </Constraints>
-    </TypeParameter>
-    <TypeParameter Name="TResult">
-      <Constraints>
-        <ParameterAttribute>Covariant</ParameterAttribute>
-      </Constraints>
-    </TypeParameter>
-  </TypeParameters>
-  <Base>
-    <BaseTypeName>System.Delegate</BaseTypeName>
-  </Base>
-  <Attributes>
-    <Attribute>
-      <AttributeName>System.Runtime.CompilerServices.TypeForwardedFrom("System.Core, Version=3.5.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089")</AttributeName>
-    </Attribute>
-  </Attributes>
-  <Parameters>
-    <Parameter Name="arg1" Type="T" />
-  </Parameters>
-  <ReturnValue>
-    <ReturnType>TResult</ReturnType>
-  </ReturnValue>
-  <Docs>
-    <typeparam name="T">To be added.</typeparam>
-    <typeparam name="TResult">To be added.</typeparam>
-    <param name="arg1">To be added.</param>
-    <summary>To be added.</summary>
-    <value>To be added.</value>
-    <remarks>To be added.</remarks>
-  </Docs>
-</Type>
diff --git a/mcs/class/corlib/Documentation/en/System/Func`3.xml b/mcs/class/corlib/Documentation/en/System/Func`3.xml
deleted file mode 100644 (file)
index 98b1f6b..0000000
+++ /dev/null
@@ -1,49 +0,0 @@
-<Type Name="Func&lt;T1,T2,TResult&gt;" FullName="System.Func&lt;T1,T2,TResult&gt;">
-  <TypeSignature Language="C#" Value="public delegate TResult Func&lt;in T1,in T2,out TResult&gt;(T1 arg1, T2 arg2);" />
-  <AssemblyInfo>
-    <AssemblyName>mscorlib</AssemblyName>
-    <AssemblyVersion>4.0.0.0</AssemblyVersion>
-  </AssemblyInfo>
-  <TypeParameters>
-    <TypeParameter Name="T1">
-      <Constraints>
-        <ParameterAttribute>Contravariant</ParameterAttribute>
-      </Constraints>
-    </TypeParameter>
-    <TypeParameter Name="T2">
-      <Constraints>
-        <ParameterAttribute>Contravariant</ParameterAttribute>
-      </Constraints>
-    </TypeParameter>
-    <TypeParameter Name="TResult">
-      <Constraints>
-        <ParameterAttribute>Covariant</ParameterAttribute>
-      </Constraints>
-    </TypeParameter>
-  </TypeParameters>
-  <Base>
-    <BaseTypeName>System.Delegate</BaseTypeName>
-  </Base>
-  <Attributes>
-    <Attribute>
-      <AttributeName>System.Runtime.CompilerServices.TypeForwardedFrom("System.Core, Version=3.5.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089")</AttributeName>
-    </Attribute>
-  </Attributes>
-  <Parameters>
-    <Parameter Name="arg1" Type="T1" />
-    <Parameter Name="arg2" Type="T2" />
-  </Parameters>
-  <ReturnValue>
-    <ReturnType>TResult</ReturnType>
-  </ReturnValue>
-  <Docs>
-    <typeparam name="T1">To be added.</typeparam>
-    <typeparam name="T2">To be added.</typeparam>
-    <typeparam name="TResult">To be added.</typeparam>
-    <param name="arg1">To be added.</param>
-    <param name="arg2">To be added.</param>
-    <summary>To be added.</summary>
-    <value>To be added.</value>
-    <remarks>To be added.</remarks>
-  </Docs>
-</Type>
diff --git a/mcs/class/corlib/Documentation/en/System/Func`4.xml b/mcs/class/corlib/Documentation/en/System/Func`4.xml
deleted file mode 100644 (file)
index e230df7..0000000
+++ /dev/null
@@ -1,57 +0,0 @@
-<Type Name="Func&lt;T1,T2,T3,TResult&gt;" FullName="System.Func&lt;T1,T2,T3,TResult&gt;">
-  <TypeSignature Language="C#" Value="public delegate TResult Func&lt;in T1,in T2,in T3,out TResult&gt;(T1 arg1, T2 arg2, T3 arg3);" />
-  <AssemblyInfo>
-    <AssemblyName>mscorlib</AssemblyName>
-    <AssemblyVersion>4.0.0.0</AssemblyVersion>
-  </AssemblyInfo>
-  <TypeParameters>
-    <TypeParameter Name="T1">
-      <Constraints>
-        <ParameterAttribute>Contravariant</ParameterAttribute>
-      </Constraints>
-    </TypeParameter>
-    <TypeParameter Name="T2">
-      <Constraints>
-        <ParameterAttribute>Contravariant</ParameterAttribute>
-      </Constraints>
-    </TypeParameter>
-    <TypeParameter Name="T3">
-      <Constraints>
-        <ParameterAttribute>Contravariant</ParameterAttribute>
-      </Constraints>
-    </TypeParameter>
-    <TypeParameter Name="TResult">
-      <Constraints>
-        <ParameterAttribute>Covariant</ParameterAttribute>
-      </Constraints>
-    </TypeParameter>
-  </TypeParameters>
-  <Base>
-    <BaseTypeName>System.Delegate</BaseTypeName>
-  </Base>
-  <Attributes>
-    <Attribute>
-      <AttributeName>System.Runtime.CompilerServices.TypeForwardedFrom("System.Core, Version=3.5.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089")</AttributeName>
-    </Attribute>
-  </Attributes>
-  <Parameters>
-    <Parameter Name="arg1" Type="T1" />
-    <Parameter Name="arg2" Type="T2" />
-    <Parameter Name="arg3" Type="T3" />
-  </Parameters>
-  <ReturnValue>
-    <ReturnType>TResult</ReturnType>
-  </ReturnValue>
-  <Docs>
-    <typeparam name="T1">To be added.</typeparam>
-    <typeparam name="T2">To be added.</typeparam>
-    <typeparam name="T3">To be added.</typeparam>
-    <typeparam name="TResult">To be added.</typeparam>
-    <param name="arg1">To be added.</param>
-    <param name="arg2">To be added.</param>
-    <param name="arg3">To be added.</param>
-    <summary>To be added.</summary>
-    <value>To be added.</value>
-    <remarks>To be added.</remarks>
-  </Docs>
-</Type>
diff --git a/mcs/class/corlib/Documentation/en/System/Func`5.xml b/mcs/class/corlib/Documentation/en/System/Func`5.xml
deleted file mode 100644 (file)
index dcba2aa..0000000
+++ /dev/null
@@ -1,65 +0,0 @@
-<Type Name="Func&lt;T1,T2,T3,T4,TResult&gt;" FullName="System.Func&lt;T1,T2,T3,T4,TResult&gt;">
-  <TypeSignature Language="C#" Value="public delegate TResult Func&lt;in T1,in T2,in T3,in T4,out TResult&gt;(T1 arg1, T2 arg2, T3 arg3, T4 arg4);" />
-  <AssemblyInfo>
-    <AssemblyName>mscorlib</AssemblyName>
-    <AssemblyVersion>4.0.0.0</AssemblyVersion>
-  </AssemblyInfo>
-  <TypeParameters>
-    <TypeParameter Name="T1">
-      <Constraints>
-        <ParameterAttribute>Contravariant</ParameterAttribute>
-      </Constraints>
-    </TypeParameter>
-    <TypeParameter Name="T2">
-      <Constraints>
-        <ParameterAttribute>Contravariant</ParameterAttribute>
-      </Constraints>
-    </TypeParameter>
-    <TypeParameter Name="T3">
-      <Constraints>
-        <ParameterAttribute>Contravariant</ParameterAttribute>
-      </Constraints>
-    </TypeParameter>
-    <TypeParameter Name="T4">
-      <Constraints>
-        <ParameterAttribute>Contravariant</ParameterAttribute>
-      </Constraints>
-    </TypeParameter>
-    <TypeParameter Name="TResult">
-      <Constraints>
-        <ParameterAttribute>Covariant</ParameterAttribute>
-      </Constraints>
-    </TypeParameter>
-  </TypeParameters>
-  <Base>
-    <BaseTypeName>System.Delegate</BaseTypeName>
-  </Base>
-  <Attributes>
-    <Attribute>
-      <AttributeName>System.Runtime.CompilerServices.TypeForwardedFrom("System.Core, Version=3.5.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089")</AttributeName>
-    </Attribute>
-  </Attributes>
-  <Parameters>
-    <Parameter Name="arg1" Type="T1" />
-    <Parameter Name="arg2" Type="T2" />
-    <Parameter Name="arg3" Type="T3" />
-    <Parameter Name="arg4" Type="T4" />
-  </Parameters>
-  <ReturnValue>
-    <ReturnType>TResult</ReturnType>
-  </ReturnValue>
-  <Docs>
-    <typeparam name="T1">To be added.</typeparam>
-    <typeparam name="T2">To be added.</typeparam>
-    <typeparam name="T3">To be added.</typeparam>
-    <typeparam name="T4">To be added.</typeparam>
-    <typeparam name="TResult">To be added.</typeparam>
-    <param name="arg1">To be added.</param>
-    <param name="arg2">To be added.</param>
-    <param name="arg3">To be added.</param>
-    <param name="arg4">To be added.</param>
-    <summary>To be added.</summary>
-    <value>To be added.</value>
-    <remarks>To be added.</remarks>
-  </Docs>
-</Type>
diff --git a/mcs/class/corlib/Documentation/en/System/InvalidTimeZoneException.xml b/mcs/class/corlib/Documentation/en/System/InvalidTimeZoneException.xml
deleted file mode 100644 (file)
index f3783b5..0000000
+++ /dev/null
@@ -1,83 +0,0 @@
-<Type Name="InvalidTimeZoneException" FullName="System.InvalidTimeZoneException">
-  <TypeSignature Language="C#" Value="public class InvalidTimeZoneException : Exception" />
-  <AssemblyInfo>
-    <AssemblyName>mscorlib</AssemblyName>
-    <AssemblyVersion>4.0.0.0</AssemblyVersion>
-  </AssemblyInfo>
-  <Base>
-    <BaseTypeName>System.Exception</BaseTypeName>
-  </Base>
-  <Interfaces />
-  <Attributes>
-    <Attribute>
-      <AttributeName>System.Runtime.CompilerServices.TypeForwardedFrom("System.Core, Version=3.5.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089")</AttributeName>
-    </Attribute>
-  </Attributes>
-  <Docs>
-    <summary>To be added.</summary>
-    <remarks>To be added.</remarks>
-  </Docs>
-  <Members>
-    <Member MemberName=".ctor">
-      <MemberSignature Language="C#" Value="public InvalidTimeZoneException ();" />
-      <MemberType>Constructor</MemberType>
-      <AssemblyInfo>
-        <AssemblyVersion>4.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-      <Parameters />
-      <Docs>
-        <summary>To be added.</summary>
-        <remarks>To be added.</remarks>
-      </Docs>
-    </Member>
-    <Member MemberName=".ctor">
-      <MemberSignature Language="C#" Value="public InvalidTimeZoneException (string message);" />
-      <MemberType>Constructor</MemberType>
-      <AssemblyInfo>
-        <AssemblyVersion>4.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-      <Parameters>
-        <Parameter Name="message" Type="System.String" />
-      </Parameters>
-      <Docs>
-        <param name="message">To be added.</param>
-        <summary>To be added.</summary>
-        <remarks>To be added.</remarks>
-      </Docs>
-    </Member>
-    <Member MemberName=".ctor">
-      <MemberSignature Language="C#" Value="protected InvalidTimeZoneException (System.Runtime.Serialization.SerializationInfo info, System.Runtime.Serialization.StreamingContext sc);" />
-      <MemberType>Constructor</MemberType>
-      <AssemblyInfo>
-        <AssemblyVersion>4.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-      <Parameters>
-        <Parameter Name="info" Type="System.Runtime.Serialization.SerializationInfo" />
-        <Parameter Name="sc" Type="System.Runtime.Serialization.StreamingContext" />
-      </Parameters>
-      <Docs>
-        <param name="info">To be added.</param>
-        <param name="sc">To be added.</param>
-        <summary>To be added.</summary>
-        <remarks>To be added.</remarks>
-      </Docs>
-    </Member>
-    <Member MemberName=".ctor">
-      <MemberSignature Language="C#" Value="public InvalidTimeZoneException (string message, Exception e);" />
-      <MemberType>Constructor</MemberType>
-      <AssemblyInfo>
-        <AssemblyVersion>4.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-      <Parameters>
-        <Parameter Name="message" Type="System.String" />
-        <Parameter Name="e" Type="System.Exception" />
-      </Parameters>
-      <Docs>
-        <param name="message">To be added.</param>
-        <param name="e">To be added.</param>
-        <summary>To be added.</summary>
-        <remarks>To be added.</remarks>
-      </Docs>
-    </Member>
-  </Members>
-</Type>
diff --git a/mcs/class/corlib/Documentation/en/System/TimeZoneInfo+AdjustmentRule.xml b/mcs/class/corlib/Documentation/en/System/TimeZoneInfo+AdjustmentRule.xml
deleted file mode 100644 (file)
index 03cdb17..0000000
+++ /dev/null
@@ -1,207 +0,0 @@
-<Type Name="TimeZoneInfo+AdjustmentRule" FullName="System.TimeZoneInfo+AdjustmentRule">
-  <TypeSignature Language="C#" Value="public sealed class TimeZoneInfo.AdjustmentRule : IEquatable&lt;TimeZoneInfo.AdjustmentRule&gt;, System.Runtime.Serialization.IDeserializationCallback, System.Runtime.Serialization.ISerializable" />
-  <AssemblyInfo>
-    <AssemblyName>mscorlib</AssemblyName>
-    <AssemblyVersion>4.0.0.0</AssemblyVersion>
-  </AssemblyInfo>
-  <Base>
-    <BaseTypeName>System.Object</BaseTypeName>
-  </Base>
-  <Interfaces>
-    <Interface>
-      <InterfaceName>System.IEquatable&lt;System.TimeZoneInfo+AdjustmentRule&gt;</InterfaceName>
-    </Interface>
-    <Interface>
-      <InterfaceName>System.Runtime.Serialization.IDeserializationCallback</InterfaceName>
-    </Interface>
-    <Interface>
-      <InterfaceName>System.Runtime.Serialization.ISerializable</InterfaceName>
-    </Interface>
-  </Interfaces>
-  <Attributes>
-    <Attribute>
-      <AttributeName>System.Runtime.CompilerServices.TypeForwardedFrom("System.Core, Version=3.5.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089")</AttributeName>
-    </Attribute>
-  </Attributes>
-  <Docs>
-    <summary>To be added.</summary>
-    <remarks>To be added.</remarks>
-  </Docs>
-  <Members>
-    <Member MemberName="CreateAdjustmentRule">
-      <MemberSignature Language="C#" Value="public static TimeZoneInfo.AdjustmentRule CreateAdjustmentRule (DateTime dateStart, DateTime dateEnd, TimeSpan daylightDelta, TimeZoneInfo.TransitionTime daylightTransitionStart, TimeZoneInfo.TransitionTime daylightTransitionEnd);" />
-      <MemberType>Method</MemberType>
-      <AssemblyInfo>
-        <AssemblyVersion>4.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-      <ReturnValue>
-        <ReturnType>System.TimeZoneInfo+AdjustmentRule</ReturnType>
-      </ReturnValue>
-      <Parameters>
-        <Parameter Name="dateStart" Type="System.DateTime" />
-        <Parameter Name="dateEnd" Type="System.DateTime" />
-        <Parameter Name="daylightDelta" Type="System.TimeSpan" />
-        <Parameter Name="daylightTransitionStart" Type="System.TimeZoneInfo+TransitionTime" />
-        <Parameter Name="daylightTransitionEnd" Type="System.TimeZoneInfo+TransitionTime" />
-      </Parameters>
-      <Docs>
-        <param name="dateStart">To be added.</param>
-        <param name="dateEnd">To be added.</param>
-        <param name="daylightDelta">To be added.</param>
-        <param name="daylightTransitionStart">To be added.</param>
-        <param name="daylightTransitionEnd">To be added.</param>
-        <summary>To be added.</summary>
-        <returns>To be added.</returns>
-        <remarks>To be added.</remarks>
-      </Docs>
-    </Member>
-    <Member MemberName="DateEnd">
-      <MemberSignature Language="C#" Value="public DateTime DateEnd { get; }" />
-      <MemberType>Property</MemberType>
-      <AssemblyInfo>
-        <AssemblyVersion>4.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-      <ReturnValue>
-        <ReturnType>System.DateTime</ReturnType>
-      </ReturnValue>
-      <Docs>
-        <summary>To be added.</summary>
-        <value>To be added.</value>
-        <remarks>To be added.</remarks>
-      </Docs>
-    </Member>
-    <Member MemberName="DateStart">
-      <MemberSignature Language="C#" Value="public DateTime DateStart { get; }" />
-      <MemberType>Property</MemberType>
-      <AssemblyInfo>
-        <AssemblyVersion>4.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-      <ReturnValue>
-        <ReturnType>System.DateTime</ReturnType>
-      </ReturnValue>
-      <Docs>
-        <summary>To be added.</summary>
-        <value>To be added.</value>
-        <remarks>To be added.</remarks>
-      </Docs>
-    </Member>
-    <Member MemberName="DaylightDelta">
-      <MemberSignature Language="C#" Value="public TimeSpan DaylightDelta { get; }" />
-      <MemberType>Property</MemberType>
-      <AssemblyInfo>
-        <AssemblyVersion>4.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-      <ReturnValue>
-        <ReturnType>System.TimeSpan</ReturnType>
-      </ReturnValue>
-      <Docs>
-        <summary>To be added.</summary>
-        <value>To be added.</value>
-        <remarks>To be added.</remarks>
-      </Docs>
-    </Member>
-    <Member MemberName="DaylightTransitionEnd">
-      <MemberSignature Language="C#" Value="public TimeZoneInfo.TransitionTime DaylightTransitionEnd { get; }" />
-      <MemberType>Property</MemberType>
-      <AssemblyInfo>
-        <AssemblyVersion>4.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-      <ReturnValue>
-        <ReturnType>System.TimeZoneInfo+TransitionTime</ReturnType>
-      </ReturnValue>
-      <Docs>
-        <summary>To be added.</summary>
-        <value>To be added.</value>
-        <remarks>To be added.</remarks>
-      </Docs>
-    </Member>
-    <Member MemberName="DaylightTransitionStart">
-      <MemberSignature Language="C#" Value="public TimeZoneInfo.TransitionTime DaylightTransitionStart { get; }" />
-      <MemberType>Property</MemberType>
-      <AssemblyInfo>
-        <AssemblyVersion>4.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-      <ReturnValue>
-        <ReturnType>System.TimeZoneInfo+TransitionTime</ReturnType>
-      </ReturnValue>
-      <Docs>
-        <summary>To be added.</summary>
-        <value>To be added.</value>
-        <remarks>To be added.</remarks>
-      </Docs>
-    </Member>
-    <Member MemberName="Equals">
-      <MemberSignature Language="C#" Value="public bool Equals (TimeZoneInfo.AdjustmentRule other);" />
-      <MemberType>Method</MemberType>
-      <AssemblyInfo>
-        <AssemblyVersion>4.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-      <ReturnValue>
-        <ReturnType>System.Boolean</ReturnType>
-      </ReturnValue>
-      <Parameters>
-        <Parameter Name="other" Type="System.TimeZoneInfo+AdjustmentRule" />
-      </Parameters>
-      <Docs>
-        <param name="other">To be added.</param>
-        <summary>To be added.</summary>
-        <returns>To be added.</returns>
-        <remarks>To be added.</remarks>
-      </Docs>
-    </Member>
-    <Member MemberName="GetHashCode">
-      <MemberSignature Language="C#" Value="public override int GetHashCode ();" />
-      <MemberType>Method</MemberType>
-      <AssemblyInfo>
-        <AssemblyVersion>4.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-      <ReturnValue>
-        <ReturnType>System.Int32</ReturnType>
-      </ReturnValue>
-      <Parameters />
-      <Docs>
-        <summary>To be added.</summary>
-        <returns>To be added.</returns>
-        <remarks>To be added.</remarks>
-      </Docs>
-    </Member>
-    <Member MemberName="System.Runtime.Serialization.IDeserializationCallback.OnDeserialization">
-      <MemberSignature Language="C#" Value="void IDeserializationCallback.OnDeserialization (object sender);" />
-      <MemberType>Method</MemberType>
-      <AssemblyInfo>
-        <AssemblyVersion>4.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-      <ReturnValue>
-        <ReturnType>System.Void</ReturnType>
-      </ReturnValue>
-      <Parameters>
-        <Parameter Name="sender" Type="System.Object" />
-      </Parameters>
-      <Docs>
-        <param name="sender">To be added.</param>
-        <summary>To be added.</summary>
-        <remarks>To be added.</remarks>
-      </Docs>
-    </Member>
-    <Member MemberName="System.Runtime.Serialization.ISerializable.GetObjectData">
-      <MemberSignature Language="C#" Value="void ISerializable.GetObjectData (System.Runtime.Serialization.SerializationInfo info, System.Runtime.Serialization.StreamingContext context);" />
-      <MemberType>Method</MemberType>
-      <AssemblyInfo>
-        <AssemblyVersion>4.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-      <ReturnValue>
-        <ReturnType>System.Void</ReturnType>
-      </ReturnValue>
-      <Parameters>
-        <Parameter Name="info" Type="System.Runtime.Serialization.SerializationInfo" />
-        <Parameter Name="context" Type="System.Runtime.Serialization.StreamingContext" />
-      </Parameters>
-      <Docs>
-        <param name="info">To be added.</param>
-        <param name="context">To be added.</param>
-        <summary>To be added.</summary>
-        <remarks>To be added.</remarks>
-      </Docs>
-    </Member>
-  </Members>
-</Type>
diff --git a/mcs/class/corlib/Documentation/en/System/TimeZoneInfo+TransitionTime.xml b/mcs/class/corlib/Documentation/en/System/TimeZoneInfo+TransitionTime.xml
deleted file mode 100644 (file)
index 677456c..0000000
+++ /dev/null
@@ -1,304 +0,0 @@
-<Type Name="TimeZoneInfo+TransitionTime" FullName="System.TimeZoneInfo+TransitionTime">
-  <TypeSignature Language="C#" Value="public struct TimeZoneInfo.TransitionTime : IEquatable&lt;TimeZoneInfo.TransitionTime&gt;, System.Runtime.Serialization.IDeserializationCallback, System.Runtime.Serialization.ISerializable" />
-  <AssemblyInfo>
-    <AssemblyName>mscorlib</AssemblyName>
-    <AssemblyVersion>4.0.0.0</AssemblyVersion>
-  </AssemblyInfo>
-  <Base>
-    <BaseTypeName>System.ValueType</BaseTypeName>
-  </Base>
-  <Interfaces>
-    <Interface>
-      <InterfaceName>System.IEquatable&lt;System.TimeZoneInfo+TransitionTime&gt;</InterfaceName>
-    </Interface>
-    <Interface>
-      <InterfaceName>System.Runtime.Serialization.IDeserializationCallback</InterfaceName>
-    </Interface>
-    <Interface>
-      <InterfaceName>System.Runtime.Serialization.ISerializable</InterfaceName>
-    </Interface>
-  </Interfaces>
-  <Attributes>
-    <Attribute>
-      <AttributeName>System.Runtime.CompilerServices.TypeForwardedFrom("System.Core, Version=3.5.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089")</AttributeName>
-    </Attribute>
-  </Attributes>
-  <Docs>
-    <summary>To be added.</summary>
-    <remarks>To be added.</remarks>
-  </Docs>
-  <Members>
-    <Member MemberName="CreateFixedDateRule">
-      <MemberSignature Language="C#" Value="public static TimeZoneInfo.TransitionTime CreateFixedDateRule (DateTime timeOfDay, int month, int day);" />
-      <MemberType>Method</MemberType>
-      <AssemblyInfo>
-        <AssemblyVersion>4.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-      <ReturnValue>
-        <ReturnType>System.TimeZoneInfo+TransitionTime</ReturnType>
-      </ReturnValue>
-      <Parameters>
-        <Parameter Name="timeOfDay" Type="System.DateTime" />
-        <Parameter Name="month" Type="System.Int32" />
-        <Parameter Name="day" Type="System.Int32" />
-      </Parameters>
-      <Docs>
-        <param name="timeOfDay">To be added.</param>
-        <param name="month">To be added.</param>
-        <param name="day">To be added.</param>
-        <summary>To be added.</summary>
-        <returns>To be added.</returns>
-        <remarks>To be added.</remarks>
-      </Docs>
-    </Member>
-    <Member MemberName="CreateFloatingDateRule">
-      <MemberSignature Language="C#" Value="public static TimeZoneInfo.TransitionTime CreateFloatingDateRule (DateTime timeOfDay, int month, int week, DayOfWeek dayOfWeek);" />
-      <MemberType>Method</MemberType>
-      <AssemblyInfo>
-        <AssemblyVersion>4.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-      <ReturnValue>
-        <ReturnType>System.TimeZoneInfo+TransitionTime</ReturnType>
-      </ReturnValue>
-      <Parameters>
-        <Parameter Name="timeOfDay" Type="System.DateTime" />
-        <Parameter Name="month" Type="System.Int32" />
-        <Parameter Name="week" Type="System.Int32" />
-        <Parameter Name="dayOfWeek" Type="System.DayOfWeek" />
-      </Parameters>
-      <Docs>
-        <param name="timeOfDay">To be added.</param>
-        <param name="month">To be added.</param>
-        <param name="week">To be added.</param>
-        <param name="dayOfWeek">To be added.</param>
-        <summary>To be added.</summary>
-        <returns>To be added.</returns>
-        <remarks>To be added.</remarks>
-      </Docs>
-    </Member>
-    <Member MemberName="Day">
-      <MemberSignature Language="C#" Value="public int Day { get; }" />
-      <MemberType>Property</MemberType>
-      <AssemblyInfo>
-        <AssemblyVersion>4.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-      <ReturnValue>
-        <ReturnType>System.Int32</ReturnType>
-      </ReturnValue>
-      <Docs>
-        <summary>To be added.</summary>
-        <value>To be added.</value>
-        <remarks>To be added.</remarks>
-      </Docs>
-    </Member>
-    <Member MemberName="DayOfWeek">
-      <MemberSignature Language="C#" Value="public DayOfWeek DayOfWeek { get; }" />
-      <MemberType>Property</MemberType>
-      <AssemblyInfo>
-        <AssemblyVersion>4.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-      <ReturnValue>
-        <ReturnType>System.DayOfWeek</ReturnType>
-      </ReturnValue>
-      <Docs>
-        <summary>To be added.</summary>
-        <value>To be added.</value>
-        <remarks>To be added.</remarks>
-      </Docs>
-    </Member>
-    <Member MemberName="Equals">
-      <MemberSignature Language="C#" Value="public override bool Equals (object other);" />
-      <MemberType>Method</MemberType>
-      <AssemblyInfo>
-        <AssemblyVersion>4.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-      <ReturnValue>
-        <ReturnType>System.Boolean</ReturnType>
-      </ReturnValue>
-      <Parameters>
-        <Parameter Name="other" Type="System.Object" />
-      </Parameters>
-      <Docs>
-        <param name="other">To be added.</param>
-        <summary>To be added.</summary>
-        <returns>To be added.</returns>
-        <remarks>To be added.</remarks>
-      </Docs>
-    </Member>
-    <Member MemberName="Equals">
-      <MemberSignature Language="C#" Value="public bool Equals (TimeZoneInfo.TransitionTime other);" />
-      <MemberType>Method</MemberType>
-      <AssemblyInfo>
-        <AssemblyVersion>4.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-      <ReturnValue>
-        <ReturnType>System.Boolean</ReturnType>
-      </ReturnValue>
-      <Parameters>
-        <Parameter Name="other" Type="System.TimeZoneInfo+TransitionTime" />
-      </Parameters>
-      <Docs>
-        <param name="other">To be added.</param>
-        <summary>To be added.</summary>
-        <returns>To be added.</returns>
-        <remarks>To be added.</remarks>
-      </Docs>
-    </Member>
-    <Member MemberName="GetHashCode">
-      <MemberSignature Language="C#" Value="public override int GetHashCode ();" />
-      <MemberType>Method</MemberType>
-      <AssemblyInfo>
-        <AssemblyVersion>4.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-      <ReturnValue>
-        <ReturnType>System.Int32</ReturnType>
-      </ReturnValue>
-      <Parameters />
-      <Docs>
-        <summary>To be added.</summary>
-        <returns>To be added.</returns>
-        <remarks>To be added.</remarks>
-      </Docs>
-    </Member>
-    <Member MemberName="IsFixedDateRule">
-      <MemberSignature Language="C#" Value="public bool IsFixedDateRule { get; }" />
-      <MemberType>Property</MemberType>
-      <AssemblyInfo>
-        <AssemblyVersion>4.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-      <ReturnValue>
-        <ReturnType>System.Boolean</ReturnType>
-      </ReturnValue>
-      <Docs>
-        <summary>To be added.</summary>
-        <value>To be added.</value>
-        <remarks>To be added.</remarks>
-      </Docs>
-    </Member>
-    <Member MemberName="Month">
-      <MemberSignature Language="C#" Value="public int Month { get; }" />
-      <MemberType>Property</MemberType>
-      <AssemblyInfo>
-        <AssemblyVersion>4.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-      <ReturnValue>
-        <ReturnType>System.Int32</ReturnType>
-      </ReturnValue>
-      <Docs>
-        <summary>To be added.</summary>
-        <value>To be added.</value>
-        <remarks>To be added.</remarks>
-      </Docs>
-    </Member>
-    <Member MemberName="op_Equality">
-      <MemberSignature Language="C#" Value="public static bool op_Equality (TimeZoneInfo.TransitionTime t1, TimeZoneInfo.TransitionTime t2);" />
-      <MemberType>Method</MemberType>
-      <AssemblyInfo>
-        <AssemblyVersion>4.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-      <ReturnValue>
-        <ReturnType>System.Boolean</ReturnType>
-      </ReturnValue>
-      <Parameters>
-        <Parameter Name="t1" Type="System.TimeZoneInfo+TransitionTime" />
-        <Parameter Name="t2" Type="System.TimeZoneInfo+TransitionTime" />
-      </Parameters>
-      <Docs>
-        <param name="t1">To be added.</param>
-        <param name="t2">To be added.</param>
-        <summary>To be added.</summary>
-        <returns>To be added.</returns>
-        <remarks>To be added.</remarks>
-      </Docs>
-    </Member>
-    <Member MemberName="op_Inequality">
-      <MemberSignature Language="C#" Value="public static bool op_Inequality (TimeZoneInfo.TransitionTime t1, TimeZoneInfo.TransitionTime t2);" />
-      <MemberType>Method</MemberType>
-      <AssemblyInfo>
-        <AssemblyVersion>4.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-      <ReturnValue>
-        <ReturnType>System.Boolean</ReturnType>
-      </ReturnValue>
-      <Parameters>
-        <Parameter Name="t1" Type="System.TimeZoneInfo+TransitionTime" />
-        <Parameter Name="t2" Type="System.TimeZoneInfo+TransitionTime" />
-      </Parameters>
-      <Docs>
-        <param name="t1">To be added.</param>
-        <param name="t2">To be added.</param>
-        <summary>To be added.</summary>
-        <returns>To be added.</returns>
-        <remarks>To be added.</remarks>
-      </Docs>
-    </Member>
-    <Member MemberName="System.Runtime.Serialization.IDeserializationCallback.OnDeserialization">
-      <MemberSignature Language="C#" Value="void IDeserializationCallback.OnDeserialization (object sender);" />
-      <MemberType>Method</MemberType>
-      <AssemblyInfo>
-        <AssemblyVersion>4.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-      <ReturnValue>
-        <ReturnType>System.Void</ReturnType>
-      </ReturnValue>
-      <Parameters>
-        <Parameter Name="sender" Type="System.Object" />
-      </Parameters>
-      <Docs>
-        <param name="sender">To be added.</param>
-        <summary>To be added.</summary>
-        <remarks>To be added.</remarks>
-      </Docs>
-    </Member>
-    <Member MemberName="System.Runtime.Serialization.ISerializable.GetObjectData">
-      <MemberSignature Language="C#" Value="void ISerializable.GetObjectData (System.Runtime.Serialization.SerializationInfo info, System.Runtime.Serialization.StreamingContext context);" />
-      <MemberType>Method</MemberType>
-      <AssemblyInfo>
-        <AssemblyVersion>4.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-      <ReturnValue>
-        <ReturnType>System.Void</ReturnType>
-      </ReturnValue>
-      <Parameters>
-        <Parameter Name="info" Type="System.Runtime.Serialization.SerializationInfo" />
-        <Parameter Name="context" Type="System.Runtime.Serialization.StreamingContext" />
-      </Parameters>
-      <Docs>
-        <param name="info">To be added.</param>
-        <param name="context">To be added.</param>
-        <summary>To be added.</summary>
-        <remarks>To be added.</remarks>
-      </Docs>
-    </Member>
-    <Member MemberName="TimeOfDay">
-      <MemberSignature Language="C#" Value="public DateTime TimeOfDay { get; }" />
-      <MemberType>Property</MemberType>
-      <AssemblyInfo>
-        <AssemblyVersion>4.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-      <ReturnValue>
-        <ReturnType>System.DateTime</ReturnType>
-      </ReturnValue>
-      <Docs>
-        <summary>To be added.</summary>
-        <value>To be added.</value>
-        <remarks>To be added.</remarks>
-      </Docs>
-    </Member>
-    <Member MemberName="Week">
-      <MemberSignature Language="C#" Value="public int Week { get; }" />
-      <MemberType>Property</MemberType>
-      <AssemblyInfo>
-        <AssemblyVersion>4.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-      <ReturnValue>
-        <ReturnType>System.Int32</ReturnType>
-      </ReturnValue>
-      <Docs>
-        <summary>To be added.</summary>
-        <value>To be added.</value>
-        <remarks>To be added.</remarks>
-      </Docs>
-    </Member>
-  </Members>
-</Type>
diff --git a/mcs/class/corlib/Documentation/en/System/TimeZoneInfo.xml b/mcs/class/corlib/Documentation/en/System/TimeZoneInfo.xml
deleted file mode 100644 (file)
index 1fdf45c..0000000
+++ /dev/null
@@ -1,808 +0,0 @@
-<Type Name="TimeZoneInfo" FullName="System.TimeZoneInfo">
-  <TypeSignature Language="C#" Value="public sealed class TimeZoneInfo : IEquatable&lt;TimeZoneInfo&gt;, System.Runtime.Serialization.IDeserializationCallback, System.Runtime.Serialization.ISerializable" />
-  <AssemblyInfo>
-    <AssemblyName>mscorlib</AssemblyName>
-    <AssemblyVersion>4.0.0.0</AssemblyVersion>
-  </AssemblyInfo>
-  <Base>
-    <BaseTypeName>System.Object</BaseTypeName>
-  </Base>
-  <Interfaces>
-    <Interface>
-      <InterfaceName>System.IEquatable&lt;System.TimeZoneInfo&gt;</InterfaceName>
-    </Interface>
-    <Interface>
-      <InterfaceName>System.Runtime.Serialization.IDeserializationCallback</InterfaceName>
-    </Interface>
-    <Interface>
-      <InterfaceName>System.Runtime.Serialization.ISerializable</InterfaceName>
-    </Interface>
-  </Interfaces>
-  <Attributes>
-    <Attribute>
-      <AttributeName>System.Runtime.CompilerServices.TypeForwardedFrom("System.Core, Version=3.5.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089")</AttributeName>
-    </Attribute>
-  </Attributes>
-  <Docs>
-    <summary>To be added.</summary>
-    <remarks>To be added.</remarks>
-  </Docs>
-  <Members>
-    <Member MemberName="BaseUtcOffset">
-      <MemberSignature Language="C#" Value="public TimeSpan BaseUtcOffset { get; }" />
-      <MemberType>Property</MemberType>
-      <AssemblyInfo>
-        <AssemblyVersion>4.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-      <ReturnValue>
-        <ReturnType>System.TimeSpan</ReturnType>
-      </ReturnValue>
-      <Docs>
-        <summary>To be added.</summary>
-        <value>To be added.</value>
-        <remarks>To be added.</remarks>
-      </Docs>
-    </Member>
-    <Member MemberName="ClearCachedData">
-      <MemberSignature Language="C#" Value="public static void ClearCachedData ();" />
-      <MemberType>Method</MemberType>
-      <AssemblyInfo>
-        <AssemblyVersion>4.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-      <ReturnValue>
-        <ReturnType>System.Void</ReturnType>
-      </ReturnValue>
-      <Parameters />
-      <Docs>
-        <summary>To be added.</summary>
-        <remarks>To be added.</remarks>
-      </Docs>
-    </Member>
-    <Member MemberName="ConvertTime">
-      <MemberSignature Language="C#" Value="public static DateTime ConvertTime (DateTime dateTime, TimeZoneInfo destinationTimeZone);" />
-      <MemberType>Method</MemberType>
-      <AssemblyInfo>
-        <AssemblyVersion>4.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-      <ReturnValue>
-        <ReturnType>System.DateTime</ReturnType>
-      </ReturnValue>
-      <Parameters>
-        <Parameter Name="dateTime" Type="System.DateTime" />
-        <Parameter Name="destinationTimeZone" Type="System.TimeZoneInfo" />
-      </Parameters>
-      <Docs>
-        <param name="dateTime">To be added.</param>
-        <param name="destinationTimeZone">To be added.</param>
-        <summary>To be added.</summary>
-        <returns>To be added.</returns>
-        <remarks>To be added.</remarks>
-      </Docs>
-    </Member>
-    <Member MemberName="ConvertTime">
-      <MemberSignature Language="C#" Value="public static DateTimeOffset ConvertTime (DateTimeOffset dateTimeOffset, TimeZoneInfo destinationTimeZone);" />
-      <MemberType>Method</MemberType>
-      <AssemblyInfo>
-        <AssemblyVersion>4.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-      <ReturnValue>
-        <ReturnType>System.DateTimeOffset</ReturnType>
-      </ReturnValue>
-      <Parameters>
-        <Parameter Name="dateTimeOffset" Type="System.DateTimeOffset" />
-        <Parameter Name="destinationTimeZone" Type="System.TimeZoneInfo" />
-      </Parameters>
-      <Docs>
-        <param name="dateTimeOffset">To be added.</param>
-        <param name="destinationTimeZone">To be added.</param>
-        <summary>To be added.</summary>
-        <returns>To be added.</returns>
-        <remarks>To be added.</remarks>
-      </Docs>
-    </Member>
-    <Member MemberName="ConvertTime">
-      <MemberSignature Language="C#" Value="public static DateTime ConvertTime (DateTime dateTime, TimeZoneInfo sourceTimeZone, TimeZoneInfo destinationTimeZone);" />
-      <MemberType>Method</MemberType>
-      <AssemblyInfo>
-        <AssemblyVersion>4.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-      <ReturnValue>
-        <ReturnType>System.DateTime</ReturnType>
-      </ReturnValue>
-      <Parameters>
-        <Parameter Name="dateTime" Type="System.DateTime" />
-        <Parameter Name="sourceTimeZone" Type="System.TimeZoneInfo" />
-        <Parameter Name="destinationTimeZone" Type="System.TimeZoneInfo" />
-      </Parameters>
-      <Docs>
-        <param name="dateTime">To be added.</param>
-        <param name="sourceTimeZone">To be added.</param>
-        <param name="destinationTimeZone">To be added.</param>
-        <summary>To be added.</summary>
-        <returns>To be added.</returns>
-        <remarks>To be added.</remarks>
-      </Docs>
-    </Member>
-    <Member MemberName="ConvertTimeBySystemTimeZoneId">
-      <MemberSignature Language="C#" Value="public static DateTime ConvertTimeBySystemTimeZoneId (DateTime dateTime, string destinationTimeZoneId);" />
-      <MemberType>Method</MemberType>
-      <AssemblyInfo>
-        <AssemblyVersion>4.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-      <ReturnValue>
-        <ReturnType>System.DateTime</ReturnType>
-      </ReturnValue>
-      <Parameters>
-        <Parameter Name="dateTime" Type="System.DateTime" />
-        <Parameter Name="destinationTimeZoneId" Type="System.String" />
-      </Parameters>
-      <Docs>
-        <param name="dateTime">To be added.</param>
-        <param name="destinationTimeZoneId">To be added.</param>
-        <summary>To be added.</summary>
-        <returns>To be added.</returns>
-        <remarks>To be added.</remarks>
-      </Docs>
-    </Member>
-    <Member MemberName="ConvertTimeBySystemTimeZoneId">
-      <MemberSignature Language="C#" Value="public static DateTimeOffset ConvertTimeBySystemTimeZoneId (DateTimeOffset dateTimeOffset, string destinationTimeZoneId);" />
-      <MemberType>Method</MemberType>
-      <AssemblyInfo>
-        <AssemblyVersion>4.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-      <ReturnValue>
-        <ReturnType>System.DateTimeOffset</ReturnType>
-      </ReturnValue>
-      <Parameters>
-        <Parameter Name="dateTimeOffset" Type="System.DateTimeOffset" />
-        <Parameter Name="destinationTimeZoneId" Type="System.String" />
-      </Parameters>
-      <Docs>
-        <param name="dateTimeOffset">To be added.</param>
-        <param name="destinationTimeZoneId">To be added.</param>
-        <summary>To be added.</summary>
-        <returns>To be added.</returns>
-        <remarks>To be added.</remarks>
-      </Docs>
-    </Member>
-    <Member MemberName="ConvertTimeBySystemTimeZoneId">
-      <MemberSignature Language="C#" Value="public static DateTime ConvertTimeBySystemTimeZoneId (DateTime dateTime, string sourceTimeZoneId, string destinationTimeZoneId);" />
-      <MemberType>Method</MemberType>
-      <AssemblyInfo>
-        <AssemblyVersion>4.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-      <ReturnValue>
-        <ReturnType>System.DateTime</ReturnType>
-      </ReturnValue>
-      <Parameters>
-        <Parameter Name="dateTime" Type="System.DateTime" />
-        <Parameter Name="sourceTimeZoneId" Type="System.String" />
-        <Parameter Name="destinationTimeZoneId" Type="System.String" />
-      </Parameters>
-      <Docs>
-        <param name="dateTime">To be added.</param>
-        <param name="sourceTimeZoneId">To be added.</param>
-        <param name="destinationTimeZoneId">To be added.</param>
-        <summary>To be added.</summary>
-        <returns>To be added.</returns>
-        <remarks>To be added.</remarks>
-      </Docs>
-    </Member>
-    <Member MemberName="ConvertTimeFromUtc">
-      <MemberSignature Language="C#" Value="public static DateTime ConvertTimeFromUtc (DateTime dateTime, TimeZoneInfo destinationTimeZone);" />
-      <MemberType>Method</MemberType>
-      <AssemblyInfo>
-        <AssemblyVersion>4.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-      <ReturnValue>
-        <ReturnType>System.DateTime</ReturnType>
-      </ReturnValue>
-      <Parameters>
-        <Parameter Name="dateTime" Type="System.DateTime" />
-        <Parameter Name="destinationTimeZone" Type="System.TimeZoneInfo" />
-      </Parameters>
-      <Docs>
-        <param name="dateTime">To be added.</param>
-        <param name="destinationTimeZone">To be added.</param>
-        <summary>To be added.</summary>
-        <returns>To be added.</returns>
-        <remarks>To be added.</remarks>
-      </Docs>
-    </Member>
-    <Member MemberName="ConvertTimeToUtc">
-      <MemberSignature Language="C#" Value="public static DateTime ConvertTimeToUtc (DateTime dateTime);" />
-      <MemberType>Method</MemberType>
-      <AssemblyInfo>
-        <AssemblyVersion>4.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-      <ReturnValue>
-        <ReturnType>System.DateTime</ReturnType>
-      </ReturnValue>
-      <Parameters>
-        <Parameter Name="dateTime" Type="System.DateTime" />
-      </Parameters>
-      <Docs>
-        <param name="dateTime">To be added.</param>
-        <summary>To be added.</summary>
-        <returns>To be added.</returns>
-        <remarks>To be added.</remarks>
-      </Docs>
-    </Member>
-    <Member MemberName="ConvertTimeToUtc">
-      <MemberSignature Language="C#" Value="public static DateTime ConvertTimeToUtc (DateTime dateTime, TimeZoneInfo sourceTimeZone);" />
-      <MemberType>Method</MemberType>
-      <AssemblyInfo>
-        <AssemblyVersion>4.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-      <ReturnValue>
-        <ReturnType>System.DateTime</ReturnType>
-      </ReturnValue>
-      <Parameters>
-        <Parameter Name="dateTime" Type="System.DateTime" />
-        <Parameter Name="sourceTimeZone" Type="System.TimeZoneInfo" />
-      </Parameters>
-      <Docs>
-        <param name="dateTime">To be added.</param>
-        <param name="sourceTimeZone">To be added.</param>
-        <summary>To be added.</summary>
-        <returns>To be added.</returns>
-        <remarks>To be added.</remarks>
-      </Docs>
-    </Member>
-    <Member MemberName="CreateCustomTimeZone">
-      <MemberSignature Language="C#" Value="public static TimeZoneInfo CreateCustomTimeZone (string id, TimeSpan baseUtcOffset, string displayName, string standardDisplayName);" />
-      <MemberType>Method</MemberType>
-      <AssemblyInfo>
-        <AssemblyVersion>4.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-      <ReturnValue>
-        <ReturnType>System.TimeZoneInfo</ReturnType>
-      </ReturnValue>
-      <Parameters>
-        <Parameter Name="id" Type="System.String" />
-        <Parameter Name="baseUtcOffset" Type="System.TimeSpan" />
-        <Parameter Name="displayName" Type="System.String" />
-        <Parameter Name="standardDisplayName" Type="System.String" />
-      </Parameters>
-      <Docs>
-        <param name="id">To be added.</param>
-        <param name="baseUtcOffset">To be added.</param>
-        <param name="displayName">To be added.</param>
-        <param name="standardDisplayName">To be added.</param>
-        <summary>To be added.</summary>
-        <returns>To be added.</returns>
-        <remarks>To be added.</remarks>
-      </Docs>
-    </Member>
-    <Member MemberName="CreateCustomTimeZone">
-      <MemberSignature Language="C#" Value="public static TimeZoneInfo CreateCustomTimeZone (string id, TimeSpan baseUtcOffset, string displayName, string standardDisplayName, string daylightDisplayName, TimeZoneInfo.AdjustmentRule[] adjustmentRules);" />
-      <MemberType>Method</MemberType>
-      <AssemblyInfo>
-        <AssemblyVersion>4.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-      <ReturnValue>
-        <ReturnType>System.TimeZoneInfo</ReturnType>
-      </ReturnValue>
-      <Parameters>
-        <Parameter Name="id" Type="System.String" />
-        <Parameter Name="baseUtcOffset" Type="System.TimeSpan" />
-        <Parameter Name="displayName" Type="System.String" />
-        <Parameter Name="standardDisplayName" Type="System.String" />
-        <Parameter Name="daylightDisplayName" Type="System.String" />
-        <Parameter Name="adjustmentRules" Type="System.TimeZoneInfo+AdjustmentRule[]" />
-      </Parameters>
-      <Docs>
-        <param name="id">To be added.</param>
-        <param name="baseUtcOffset">To be added.</param>
-        <param name="displayName">To be added.</param>
-        <param name="standardDisplayName">To be added.</param>
-        <param name="daylightDisplayName">To be added.</param>
-        <param name="adjustmentRules">To be added.</param>
-        <summary>To be added.</summary>
-        <returns>To be added.</returns>
-        <remarks>To be added.</remarks>
-      </Docs>
-    </Member>
-    <Member MemberName="CreateCustomTimeZone">
-      <MemberSignature Language="C#" Value="public static TimeZoneInfo CreateCustomTimeZone (string id, TimeSpan baseUtcOffset, string displayName, string standardDisplayName, string daylightDisplayName, TimeZoneInfo.AdjustmentRule[] adjustmentRules, bool disableDaylightSavingTime);" />
-      <MemberType>Method</MemberType>
-      <AssemblyInfo>
-        <AssemblyVersion>4.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-      <ReturnValue>
-        <ReturnType>System.TimeZoneInfo</ReturnType>
-      </ReturnValue>
-      <Parameters>
-        <Parameter Name="id" Type="System.String" />
-        <Parameter Name="baseUtcOffset" Type="System.TimeSpan" />
-        <Parameter Name="displayName" Type="System.String" />
-        <Parameter Name="standardDisplayName" Type="System.String" />
-        <Parameter Name="daylightDisplayName" Type="System.String" />
-        <Parameter Name="adjustmentRules" Type="System.TimeZoneInfo+AdjustmentRule[]" />
-        <Parameter Name="disableDaylightSavingTime" Type="System.Boolean" />
-      </Parameters>
-      <Docs>
-        <param name="id">To be added.</param>
-        <param name="baseUtcOffset">To be added.</param>
-        <param name="displayName">To be added.</param>
-        <param name="standardDisplayName">To be added.</param>
-        <param name="daylightDisplayName">To be added.</param>
-        <param name="adjustmentRules">To be added.</param>
-        <param name="disableDaylightSavingTime">To be added.</param>
-        <summary>To be added.</summary>
-        <returns>To be added.</returns>
-        <remarks>To be added.</remarks>
-      </Docs>
-    </Member>
-    <Member MemberName="DaylightName">
-      <MemberSignature Language="C#" Value="public string DaylightName { get; }" />
-      <MemberType>Property</MemberType>
-      <AssemblyInfo>
-        <AssemblyVersion>4.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-      <ReturnValue>
-        <ReturnType>System.String</ReturnType>
-      </ReturnValue>
-      <Docs>
-        <summary>To be added.</summary>
-        <value>To be added.</value>
-        <remarks>To be added.</remarks>
-      </Docs>
-    </Member>
-    <Member MemberName="DisplayName">
-      <MemberSignature Language="C#" Value="public string DisplayName { get; }" />
-      <MemberType>Property</MemberType>
-      <AssemblyInfo>
-        <AssemblyVersion>4.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-      <ReturnValue>
-        <ReturnType>System.String</ReturnType>
-      </ReturnValue>
-      <Docs>
-        <summary>To be added.</summary>
-        <value>To be added.</value>
-        <remarks>To be added.</remarks>
-      </Docs>
-    </Member>
-    <Member MemberName="Equals">
-      <MemberSignature Language="C#" Value="public bool Equals (TimeZoneInfo other);" />
-      <MemberType>Method</MemberType>
-      <AssemblyInfo>
-        <AssemblyVersion>4.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-      <ReturnValue>
-        <ReturnType>System.Boolean</ReturnType>
-      </ReturnValue>
-      <Parameters>
-        <Parameter Name="other" Type="System.TimeZoneInfo" />
-      </Parameters>
-      <Docs>
-        <param name="other">To be added.</param>
-        <summary>To be added.</summary>
-        <returns>To be added.</returns>
-        <remarks>To be added.</remarks>
-      </Docs>
-    </Member>
-    <Member MemberName="FindSystemTimeZoneById">
-      <MemberSignature Language="C#" Value="public static TimeZoneInfo FindSystemTimeZoneById (string id);" />
-      <MemberType>Method</MemberType>
-      <AssemblyInfo>
-        <AssemblyVersion>4.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-      <ReturnValue>
-        <ReturnType>System.TimeZoneInfo</ReturnType>
-      </ReturnValue>
-      <Parameters>
-        <Parameter Name="id" Type="System.String" />
-      </Parameters>
-      <Docs>
-        <param name="id">To be added.</param>
-        <summary>To be added.</summary>
-        <returns>To be added.</returns>
-        <remarks>To be added.</remarks>
-      </Docs>
-    </Member>
-    <Member MemberName="FromSerializedString">
-      <MemberSignature Language="C#" Value="public static TimeZoneInfo FromSerializedString (string source);" />
-      <MemberType>Method</MemberType>
-      <AssemblyInfo>
-        <AssemblyVersion>4.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-      <ReturnValue>
-        <ReturnType>System.TimeZoneInfo</ReturnType>
-      </ReturnValue>
-      <Parameters>
-        <Parameter Name="source" Type="System.String" />
-      </Parameters>
-      <Docs>
-        <param name="source">To be added.</param>
-        <summary>To be added.</summary>
-        <returns>To be added.</returns>
-        <remarks>To be added.</remarks>
-      </Docs>
-    </Member>
-    <Member MemberName="GetAdjustmentRules">
-      <MemberSignature Language="C#" Value="public TimeZoneInfo.AdjustmentRule[] GetAdjustmentRules ();" />
-      <MemberType>Method</MemberType>
-      <AssemblyInfo>
-        <AssemblyVersion>4.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-      <ReturnValue>
-        <ReturnType>System.TimeZoneInfo+AdjustmentRule[]</ReturnType>
-      </ReturnValue>
-      <Parameters />
-      <Docs>
-        <summary>To be added.</summary>
-        <returns>To be added.</returns>
-        <remarks>To be added.</remarks>
-      </Docs>
-    </Member>
-    <Member MemberName="GetAmbiguousTimeOffsets">
-      <MemberSignature Language="C#" Value="public TimeSpan[] GetAmbiguousTimeOffsets (DateTime dateTime);" />
-      <MemberType>Method</MemberType>
-      <AssemblyInfo>
-        <AssemblyVersion>4.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-      <ReturnValue>
-        <ReturnType>System.TimeSpan[]</ReturnType>
-      </ReturnValue>
-      <Parameters>
-        <Parameter Name="dateTime" Type="System.DateTime" />
-      </Parameters>
-      <Docs>
-        <param name="dateTime">To be added.</param>
-        <summary>To be added.</summary>
-        <returns>To be added.</returns>
-        <remarks>To be added.</remarks>
-      </Docs>
-    </Member>
-    <Member MemberName="GetAmbiguousTimeOffsets">
-      <MemberSignature Language="C#" Value="public TimeSpan[] GetAmbiguousTimeOffsets (DateTimeOffset dateTimeOffset);" />
-      <MemberType>Method</MemberType>
-      <AssemblyInfo>
-        <AssemblyVersion>4.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-      <ReturnValue>
-        <ReturnType>System.TimeSpan[]</ReturnType>
-      </ReturnValue>
-      <Parameters>
-        <Parameter Name="dateTimeOffset" Type="System.DateTimeOffset" />
-      </Parameters>
-      <Docs>
-        <param name="dateTimeOffset">To be added.</param>
-        <summary>To be added.</summary>
-        <returns>To be added.</returns>
-        <remarks>To be added.</remarks>
-      </Docs>
-    </Member>
-    <Member MemberName="GetHashCode">
-      <MemberSignature Language="C#" Value="public override int GetHashCode ();" />
-      <MemberType>Method</MemberType>
-      <AssemblyInfo>
-        <AssemblyVersion>4.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-      <ReturnValue>
-        <ReturnType>System.Int32</ReturnType>
-      </ReturnValue>
-      <Parameters />
-      <Docs>
-        <summary>To be added.</summary>
-        <returns>To be added.</returns>
-        <remarks>To be added.</remarks>
-      </Docs>
-    </Member>
-    <Member MemberName="GetSystemTimeZones">
-      <MemberSignature Language="C#" Value="public static System.Collections.ObjectModel.ReadOnlyCollection&lt;TimeZoneInfo&gt; GetSystemTimeZones ();" />
-      <MemberType>Method</MemberType>
-      <AssemblyInfo>
-        <AssemblyVersion>4.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-      <ReturnValue>
-        <ReturnType>System.Collections.ObjectModel.ReadOnlyCollection&lt;System.TimeZoneInfo&gt;</ReturnType>
-      </ReturnValue>
-      <Parameters />
-      <Docs>
-        <summary>To be added.</summary>
-        <returns>To be added.</returns>
-        <remarks>To be added.</remarks>
-      </Docs>
-    </Member>
-    <Member MemberName="GetUtcOffset">
-      <MemberSignature Language="C#" Value="public TimeSpan GetUtcOffset (DateTime dateTime);" />
-      <MemberType>Method</MemberType>
-      <AssemblyInfo>
-        <AssemblyVersion>4.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-      <ReturnValue>
-        <ReturnType>System.TimeSpan</ReturnType>
-      </ReturnValue>
-      <Parameters>
-        <Parameter Name="dateTime" Type="System.DateTime" />
-      </Parameters>
-      <Docs>
-        <param name="dateTime">To be added.</param>
-        <summary>To be added.</summary>
-        <returns>To be added.</returns>
-        <remarks>To be added.</remarks>
-      </Docs>
-    </Member>
-    <Member MemberName="GetUtcOffset">
-      <MemberSignature Language="C#" Value="public TimeSpan GetUtcOffset (DateTimeOffset dateTimeOffset);" />
-      <MemberType>Method</MemberType>
-      <AssemblyInfo>
-        <AssemblyVersion>4.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-      <ReturnValue>
-        <ReturnType>System.TimeSpan</ReturnType>
-      </ReturnValue>
-      <Parameters>
-        <Parameter Name="dateTimeOffset" Type="System.DateTimeOffset" />
-      </Parameters>
-      <Docs>
-        <param name="dateTimeOffset">To be added.</param>
-        <summary>To be added.</summary>
-        <returns>To be added.</returns>
-        <remarks>To be added.</remarks>
-      </Docs>
-    </Member>
-    <Member MemberName="HasSameRules">
-      <MemberSignature Language="C#" Value="public bool HasSameRules (TimeZoneInfo other);" />
-      <MemberType>Method</MemberType>
-      <AssemblyInfo>
-        <AssemblyVersion>4.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-      <ReturnValue>
-        <ReturnType>System.Boolean</ReturnType>
-      </ReturnValue>
-      <Parameters>
-        <Parameter Name="other" Type="System.TimeZoneInfo" />
-      </Parameters>
-      <Docs>
-        <param name="other">To be added.</param>
-        <summary>To be added.</summary>
-        <returns>To be added.</returns>
-        <remarks>To be added.</remarks>
-      </Docs>
-    </Member>
-    <Member MemberName="Id">
-      <MemberSignature Language="C#" Value="public string Id { get; }" />
-      <MemberType>Property</MemberType>
-      <AssemblyInfo>
-        <AssemblyVersion>4.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-      <ReturnValue>
-        <ReturnType>System.String</ReturnType>
-      </ReturnValue>
-      <Docs>
-        <summary>To be added.</summary>
-        <value>To be added.</value>
-        <remarks>To be added.</remarks>
-      </Docs>
-    </Member>
-    <Member MemberName="IsAmbiguousTime">
-      <MemberSignature Language="C#" Value="public bool IsAmbiguousTime (DateTime dateTime);" />
-      <MemberType>Method</MemberType>
-      <AssemblyInfo>
-        <AssemblyVersion>4.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-      <ReturnValue>
-        <ReturnType>System.Boolean</ReturnType>
-      </ReturnValue>
-      <Parameters>
-        <Parameter Name="dateTime" Type="System.DateTime" />
-      </Parameters>
-      <Docs>
-        <param name="dateTime">To be added.</param>
-        <summary>To be added.</summary>
-        <returns>To be added.</returns>
-        <remarks>To be added.</remarks>
-      </Docs>
-    </Member>
-    <Member MemberName="IsAmbiguousTime">
-      <MemberSignature Language="C#" Value="public bool IsAmbiguousTime (DateTimeOffset dateTimeOffset);" />
-      <MemberType>Method</MemberType>
-      <AssemblyInfo>
-        <AssemblyVersion>4.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-      <ReturnValue>
-        <ReturnType>System.Boolean</ReturnType>
-      </ReturnValue>
-      <Parameters>
-        <Parameter Name="dateTimeOffset" Type="System.DateTimeOffset" />
-      </Parameters>
-      <Docs>
-        <param name="dateTimeOffset">To be added.</param>
-        <summary>To be added.</summary>
-        <returns>To be added.</returns>
-        <remarks>To be added.</remarks>
-      </Docs>
-    </Member>
-    <Member MemberName="IsDaylightSavingTime">
-      <MemberSignature Language="C#" Value="public bool IsDaylightSavingTime (DateTime dateTime);" />
-      <MemberType>Method</MemberType>
-      <AssemblyInfo>
-        <AssemblyVersion>4.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-      <ReturnValue>
-        <ReturnType>System.Boolean</ReturnType>
-      </ReturnValue>
-      <Parameters>
-        <Parameter Name="dateTime" Type="System.DateTime" />
-      </Parameters>
-      <Docs>
-        <param name="dateTime">To be added.</param>
-        <summary>To be added.</summary>
-        <returns>To be added.</returns>
-        <remarks>To be added.</remarks>
-      </Docs>
-    </Member>
-    <Member MemberName="IsDaylightSavingTime">
-      <MemberSignature Language="C#" Value="public bool IsDaylightSavingTime (DateTimeOffset dateTimeOffset);" />
-      <MemberType>Method</MemberType>
-      <AssemblyInfo>
-        <AssemblyVersion>4.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-      <ReturnValue>
-        <ReturnType>System.Boolean</ReturnType>
-      </ReturnValue>
-      <Parameters>
-        <Parameter Name="dateTimeOffset" Type="System.DateTimeOffset" />
-      </Parameters>
-      <Docs>
-        <param name="dateTimeOffset">To be added.</param>
-        <summary>To be added.</summary>
-        <returns>To be added.</returns>
-        <remarks>To be added.</remarks>
-      </Docs>
-    </Member>
-    <Member MemberName="IsInvalidTime">
-      <MemberSignature Language="C#" Value="public bool IsInvalidTime (DateTime dateTime);" />
-      <MemberType>Method</MemberType>
-      <AssemblyInfo>
-        <AssemblyVersion>4.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-      <ReturnValue>
-        <ReturnType>System.Boolean</ReturnType>
-      </ReturnValue>
-      <Parameters>
-        <Parameter Name="dateTime" Type="System.DateTime" />
-      </Parameters>
-      <Docs>
-        <param name="dateTime">To be added.</param>
-        <summary>To be added.</summary>
-        <returns>To be added.</returns>
-        <remarks>To be added.</remarks>
-      </Docs>
-    </Member>
-    <Member MemberName="Local">
-      <MemberSignature Language="C#" Value="public static TimeZoneInfo Local { get; }" />
-      <MemberType>Property</MemberType>
-      <AssemblyInfo>
-        <AssemblyVersion>4.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-      <ReturnValue>
-        <ReturnType>System.TimeZoneInfo</ReturnType>
-      </ReturnValue>
-      <Docs>
-        <summary>To be added.</summary>
-        <value>To be added.</value>
-        <remarks>To be added.</remarks>
-      </Docs>
-    </Member>
-    <Member MemberName="StandardName">
-      <MemberSignature Language="C#" Value="public string StandardName { get; }" />
-      <MemberType>Property</MemberType>
-      <AssemblyInfo>
-        <AssemblyVersion>4.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-      <ReturnValue>
-        <ReturnType>System.String</ReturnType>
-      </ReturnValue>
-      <Docs>
-        <summary>To be added.</summary>
-        <value>To be added.</value>
-        <remarks>To be added.</remarks>
-      </Docs>
-    </Member>
-    <Member MemberName="SupportsDaylightSavingTime">
-      <MemberSignature Language="C#" Value="public bool SupportsDaylightSavingTime { get; }" />
-      <MemberType>Property</MemberType>
-      <AssemblyInfo>
-        <AssemblyVersion>4.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-      <ReturnValue>
-        <ReturnType>System.Boolean</ReturnType>
-      </ReturnValue>
-      <Docs>
-        <summary>To be added.</summary>
-        <value>To be added.</value>
-        <remarks>To be added.</remarks>
-      </Docs>
-    </Member>
-    <Member MemberName="System.Runtime.Serialization.IDeserializationCallback.OnDeserialization">
-      <MemberSignature Language="C#" Value="void IDeserializationCallback.OnDeserialization (object sender);" />
-      <MemberType>Method</MemberType>
-      <AssemblyInfo>
-        <AssemblyVersion>4.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-      <ReturnValue>
-        <ReturnType>System.Void</ReturnType>
-      </ReturnValue>
-      <Parameters>
-        <Parameter Name="sender" Type="System.Object" />
-      </Parameters>
-      <Docs>
-        <param name="sender">To be added.</param>
-        <summary>To be added.</summary>
-        <remarks>To be added.</remarks>
-      </Docs>
-    </Member>
-    <Member MemberName="System.Runtime.Serialization.ISerializable.GetObjectData">
-      <MemberSignature Language="C#" Value="void ISerializable.GetObjectData (System.Runtime.Serialization.SerializationInfo info, System.Runtime.Serialization.StreamingContext context);" />
-      <MemberType>Method</MemberType>
-      <AssemblyInfo>
-        <AssemblyVersion>4.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-      <ReturnValue>
-        <ReturnType>System.Void</ReturnType>
-      </ReturnValue>
-      <Parameters>
-        <Parameter Name="info" Type="System.Runtime.Serialization.SerializationInfo" />
-        <Parameter Name="context" Type="System.Runtime.Serialization.StreamingContext" />
-      </Parameters>
-      <Docs>
-        <param name="info">To be added.</param>
-        <param name="context">To be added.</param>
-        <summary>To be added.</summary>
-        <remarks>To be added.</remarks>
-      </Docs>
-    </Member>
-    <Member MemberName="ToSerializedString">
-      <MemberSignature Language="C#" Value="public string ToSerializedString ();" />
-      <MemberType>Method</MemberType>
-      <AssemblyInfo>
-        <AssemblyVersion>4.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-      <ReturnValue>
-        <ReturnType>System.String</ReturnType>
-      </ReturnValue>
-      <Parameters />
-      <Docs>
-        <summary>To be added.</summary>
-        <returns>To be added.</returns>
-        <remarks>To be added.</remarks>
-      </Docs>
-    </Member>
-    <Member MemberName="ToString">
-      <MemberSignature Language="C#" Value="public override string ToString ();" />
-      <MemberType>Method</MemberType>
-      <AssemblyInfo>
-        <AssemblyVersion>4.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-      <ReturnValue>
-        <ReturnType>System.String</ReturnType>
-      </ReturnValue>
-      <Parameters />
-      <Docs>
-        <summary>To be added.</summary>
-        <returns>To be added.</returns>
-        <remarks>To be added.</remarks>
-      </Docs>
-    </Member>
-    <Member MemberName="Utc">
-      <MemberSignature Language="C#" Value="public static TimeZoneInfo Utc { get; }" />
-      <MemberType>Property</MemberType>
-      <AssemblyInfo>
-        <AssemblyVersion>4.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-      <ReturnValue>
-        <ReturnType>System.TimeZoneInfo</ReturnType>
-      </ReturnValue>
-      <Docs>
-        <summary>To be added.</summary>
-        <value>To be added.</value>
-        <remarks>To be added.</remarks>
-      </Docs>
-    </Member>
-  </Members>
-</Type>
diff --git a/mcs/class/corlib/Documentation/en/System/TimeZoneNotFoundException.xml b/mcs/class/corlib/Documentation/en/System/TimeZoneNotFoundException.xml
deleted file mode 100644 (file)
index 975fe7f..0000000
+++ /dev/null
@@ -1,83 +0,0 @@
-<Type Name="TimeZoneNotFoundException" FullName="System.TimeZoneNotFoundException">
-  <TypeSignature Language="C#" Value="public class TimeZoneNotFoundException : Exception" />
-  <AssemblyInfo>
-    <AssemblyName>mscorlib</AssemblyName>
-    <AssemblyVersion>4.0.0.0</AssemblyVersion>
-  </AssemblyInfo>
-  <Base>
-    <BaseTypeName>System.Exception</BaseTypeName>
-  </Base>
-  <Interfaces />
-  <Attributes>
-    <Attribute>
-      <AttributeName>System.Runtime.CompilerServices.TypeForwardedFrom("System.Core, Version=3.5.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089")</AttributeName>
-    </Attribute>
-  </Attributes>
-  <Docs>
-    <summary>To be added.</summary>
-    <remarks>To be added.</remarks>
-  </Docs>
-  <Members>
-    <Member MemberName=".ctor">
-      <MemberSignature Language="C#" Value="public TimeZoneNotFoundException ();" />
-      <MemberType>Constructor</MemberType>
-      <AssemblyInfo>
-        <AssemblyVersion>4.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-      <Parameters />
-      <Docs>
-        <summary>To be added.</summary>
-        <remarks>To be added.</remarks>
-      </Docs>
-    </Member>
-    <Member MemberName=".ctor">
-      <MemberSignature Language="C#" Value="public TimeZoneNotFoundException (string message);" />
-      <MemberType>Constructor</MemberType>
-      <AssemblyInfo>
-        <AssemblyVersion>4.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-      <Parameters>
-        <Parameter Name="message" Type="System.String" />
-      </Parameters>
-      <Docs>
-        <param name="message">To be added.</param>
-        <summary>To be added.</summary>
-        <remarks>To be added.</remarks>
-      </Docs>
-    </Member>
-    <Member MemberName=".ctor">
-      <MemberSignature Language="C#" Value="protected TimeZoneNotFoundException (System.Runtime.Serialization.SerializationInfo info, System.Runtime.Serialization.StreamingContext sc);" />
-      <MemberType>Constructor</MemberType>
-      <AssemblyInfo>
-        <AssemblyVersion>4.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-      <Parameters>
-        <Parameter Name="info" Type="System.Runtime.Serialization.SerializationInfo" />
-        <Parameter Name="sc" Type="System.Runtime.Serialization.StreamingContext" />
-      </Parameters>
-      <Docs>
-        <param name="info">To be added.</param>
-        <param name="sc">To be added.</param>
-        <summary>To be added.</summary>
-        <remarks>To be added.</remarks>
-      </Docs>
-    </Member>
-    <Member MemberName=".ctor">
-      <MemberSignature Language="C#" Value="public TimeZoneNotFoundException (string message, Exception e);" />
-      <MemberType>Constructor</MemberType>
-      <AssemblyInfo>
-        <AssemblyVersion>4.0.0.0</AssemblyVersion>
-      </AssemblyInfo>
-      <Parameters>
-        <Parameter Name="message" Type="System.String" />
-        <Parameter Name="e" Type="System.Exception" />
-      </Parameters>
-      <Docs>
-        <param name="message">To be added.</param>
-        <param name="e">To be added.</param>
-        <summary>To be added.</summary>
-        <remarks>To be added.</remarks>
-      </Docs>
-    </Member>
-  </Members>
-</Type>
index a930951268c6bac376249bb4986e525da9c8a1f1..dcb23d2f17c9890b7477423b5c0f73e9717f0598 100644 (file)
@@ -60,6 +60,8 @@ class MDocUpdater : MDocCommand
 
        MyXmlNodeList extensionMethods = new MyXmlNodeList ();
 
+       HashSet<string> forwardedTypes = new HashSet<string> ();
+
        public override void Run (IEnumerable<string> args)
        {
                show_exceptions = DebugOutput;
@@ -142,6 +144,9 @@ class MDocUpdater : MDocCommand
                
                this.assemblies = assemblies.Select (a => LoadAssembly (a)).ToList ();
 
+               // Store types that have been forwarded to avoid duplicate generation
+               GatherForwardedTypes ();
+
                docEnum = docEnum ?? new DocumentationEnumerator ();
                
                // PERFORM THE UPDATES
@@ -188,6 +193,13 @@ class MDocUpdater : MDocCommand
                }
        }
 
+       void GatherForwardedTypes ()
+       {
+               foreach (var asm in assemblies)
+                       foreach (var type in asm.MainModule.ExportedTypes.Where (t => t.IsForwarder).Select (t => t.FullName))
+                               forwardedTypes.Add (type);
+       }
+
        static ExceptionLocations ParseExceptionLocations (string s)
        {
                ExceptionLocations loc = ExceptionLocations.Member;
@@ -641,7 +653,7 @@ class MDocUpdater : MDocCommand
        {
                foreach (TypeDefinition type in docEnum.GetDocumentationTypes (assembly, null)) {
                        string typename = GetTypeFileName(type);
-                       if (!IsPublic (type) || typename.IndexOfAny (InvalidFilenameChars) >= 0)
+                       if (!IsPublic (type) || typename.IndexOfAny (InvalidFilenameChars) >= 0 || forwardedTypes.Contains (type.FullName))
                                continue;
 
                        string reltypepath = DoUpdateType (type, source, dest);
diff --git a/mcs/tools/monkeydoc/Assembly/AssemblyInfo.cs b/mcs/tools/monkeydoc/Assembly/AssemblyInfo.cs
new file mode 100644 (file)
index 0000000..61c5fdc
--- /dev/null
@@ -0,0 +1,6 @@
+using System.Reflection;
+using System.Runtime.CompilerServices;
+
+[assembly:AssemblyVersion("1.0.0.0")]
+[assembly:AssemblyDelaySign(false)]
+[assembly:AssemblyKeyFile("../../class/mono.snk")]
diff --git a/mcs/tools/monkeydoc/Lucene.Net/.gitattributes b/mcs/tools/monkeydoc/Lucene.Net/.gitattributes
new file mode 100644 (file)
index 0000000..f4472a3
--- /dev/null
@@ -0,0 +1,5 @@
+/ABOUT.txt -crlf
+/BUILD.txt -crlf
+/CHANGES.txt -crlf
+/HISTORY.txt -crlf
+/LICENSE.txt -crlf
diff --git a/mcs/tools/monkeydoc/Lucene.Net/ABOUT.txt b/mcs/tools/monkeydoc/Lucene.Net/ABOUT.txt
new file mode 100644 (file)
index 0000000..1f77b7f
--- /dev/null
@@ -0,0 +1 @@
+Apache Lucene.Net is a port of Jakarta Lucene to C#.  The port from Java to C# of version 1.4.0, 1.4.3, 1.9, 1.9.1, 2.0 and 2.1 are done primary by George Aroush.  To contact George Aroush please visit http://www.aroush.net/\r
diff --git a/mcs/tools/monkeydoc/Lucene.Net/BUILD.txt b/mcs/tools/monkeydoc/Lucene.Net/BUILD.txt
new file mode 100644 (file)
index 0000000..8563639
--- /dev/null
@@ -0,0 +1,23 @@
+Apache Lucene.Net Build Instructions\r
+\r
+Basic steps:\r
+  0) Install Visual Studio.NET 2005 (or greater)\r
+  1) Download Apache Lucene.Net from Apache and unpack it\r
+  2) Navigate to the sub-folder for the sub-projects either in src \r
+     or contrib\r
+  3) Using Visual Studio.NET 2005's Open Project menu option, \r
+     select the project you want to build.\r
+  4) Build the project from the Build menu\r
+\r
+For further information on Lucene, go to:\r
+  http://lucene.apache.org/lucene.net/\r
+\r
+Please join the Lucene-User mailing list by visiting this site:\r
+  http://lucene.apache.org/lucene.net/\r
+\r
+Please post suggestions, questions, corrections or additions to this\r
+document to the lucene-net-user mailing list.\r
+\r
+This file was originally written by George Aroush <george@aroush.net>.\r
+\r
+Copyright (c) 2006 - 2009 The Apache Software Foundation.  All rights reserved.\r
diff --git a/mcs/tools/monkeydoc/Lucene.Net/CHANGES.txt b/mcs/tools/monkeydoc/Lucene.Net/CHANGES.txt
new file mode 100644 (file)
index 0000000..82bb4d9
--- /dev/null
@@ -0,0 +1,3961 @@
+๏ปฟ\r
+=================== 2.9.4 trunk (not yet released) =====================\r
+\r
+Bug fixes\r
+\r
+ * LUCENENET-355  [LUCENE-2387]: Don't hang onto Fieldables from the last doc indexed,\r
+   in IndexWriter, nor the Reader in Tokenizer after close is\r
+   called. (digy) [Ruben Laguna, Uwe Schindler, Mike McCandless]\r
+\r
+\r
+Change Log Copied from Lucene \r
+======================= Release 2.9.2 2010-02-26 =======================\r
+\r
+Bug fixes\r
+\r
+ * LUCENE-2045: Fix silly FileNotFoundException hit if you enable\r
+   infoStream on IndexWriter and then add an empty document and commit\r
+   (Shai Erera via Mike McCandless)\r
+\r
+ * LUCENE-2088: addAttribute() should only accept interfaces that\r
+   extend Attribute. (Shai Erera, Uwe Schindler)\r
+\r
+ * LUCENE-2092: BooleanQuery was ignoring disableCoord in its hashCode\r
+   and equals methods, cause bad things to happen when caching\r
+   BooleanQueries.  (Chris Hostetter, Mike McCandless)\r
+\r
+ * LUCENE-2095: Fixes: when two threads call IndexWriter.commit() at\r
+   the same time, it's possible for commit to return control back to\r
+   one of the threads before all changes are actually committed.\r
+   (Sanne Grinovero via Mike McCandless)\r
+\r
+ * LUCENE-2166: Don't incorrectly keep warning about the same immense\r
+    term, when IndexWriter.infoStream is on.  (Mike McCandless)\r
+\r
+ * LUCENE-2158: At high indexing rates, NRT reader could temporarily\r
+   lose deletions.  (Mike McCandless)\r
+  \r
+ * LUCENE-2182: DEFAULT_ATTRIBUTE_FACTORY was failing to load\r
+   implementation class when interface was loaded by a different\r
+   class loader.  (Uwe Schindler, reported on java-user by Ahmed El-dawy)\r
+  \r
+ * LUCENE-2257: Increase max number of unique terms in one segment to\r
+   termIndexInterval (default 128) * ~2.1 billion = ~274 billion.\r
+   (Tom Burton-West via Mike McCandless)\r
+\r
+ * LUCENE-2260: Fixed AttributeSource to not hold a strong\r
+   reference to the Attribute/AttributeImpl classes which prevents\r
+   unloading of custom attributes loaded by other classloaders\r
+   (e.g. in Solr plugins).  (Uwe Schindler)\r
\r
+ * LUCENE-1941: Fix Min/MaxPayloadFunction returns 0 when\r
+   only one payload is present.  (Erik Hatcher, Mike McCandless\r
+   via Uwe Schindler)\r
+\r
+ * LUCENE-2270: Queries consisting of all zero-boost clauses\r
+   (for example, text:foo^0) sorted incorrectly and produced\r
+   invalid docids. (yonik)\r
+\r
+ * LUCENE-2422: Don't reuse byte[] in IndexInput/Output -- it gains\r
+   little performance, and ties up possibly large amounts of memory\r
+   for apps that index large docs.  (Ross Woolf via Mike McCandless)\r
+\r
+API Changes\r
+\r
+ * LUCENE-2190: Added a new class CustomScoreProvider to function package\r
+   that can be subclassed to provide custom scoring to CustomScoreQuery.\r
+   The methods in CustomScoreQuery that did this before were deprecated\r
+   and replaced by a method getCustomScoreProvider(IndexReader) that\r
+   returns a custom score implementation using the above class. The change\r
+   is necessary with per-segment searching, as CustomScoreQuery is\r
+   a stateless class (like all other Queries) and does not know about\r
+   the currently searched segment. This API works similar to Filter's\r
+   getDocIdSet(IndexReader).  (Paul chez Jamespot via Mike McCandless,\r
+   Uwe Schindler)\r
+\r
+ * LUCENE-2080: Deprecate Version.LUCENE_CURRENT, as using this constant\r
+   will cause backwards compatibility problems when upgrading Lucene. See\r
+   the Version javadocs for additional information.\r
+   (Robert Muir)\r
+\r
+Optimizations\r
+\r
+ * LUCENE-2086: When resolving deleted terms, do so in term sort order\r
+   for better performance (Bogdan Ghidireac via Mike McCandless)\r
+\r
+ * LUCENE-2258: Remove unneeded synchronization in FuzzyTermEnum.\r
+   (Uwe Schindler, Robert Muir)\r
+\r
+Test Cases\r
+\r
+ * LUCENE-2114: Change TestFilteredSearch to test on multi-segment\r
+   index as well. (Simon Willnauer via Mike McCandless)\r
+\r
+ * LUCENE-2211: Improves BaseTokenStreamTestCase to use a fake attribute\r
+   that checks if clearAttributes() was called correctly.\r
+   (Uwe Schindler, Robert Muir)\r
+\r
+ * LUCENE-2207, LUCENE-2219: Improve BaseTokenStreamTestCase to check if\r
+   end() is implemented correctly.  (Koji Sekiguchi, Robert Muir)\r
+\r
+Documentation\r
+\r
+ * LUCENE-2114: Improve javadocs of Filter to call out that the\r
+   provided reader is per-segment (Simon Willnauer via Mike\r
+   McCandless)\r
+\r
+======================= Release 2.9.1 2009-11-06 =======================\r
+\r
+Changes in backwards compatibility policy\r
+\r
+ * LUCENE-2002: Add required Version matchVersion argument when\r
+   constructing QueryParser or MultiFieldQueryParser and, default (as\r
+   of 2.9) enablePositionIncrements to true to match\r
+   StandardAnalyzer's 2.9 default (Uwe Schindler, Mike McCandless)\r
+\r
+Bug fixes\r
+\r
+ * LUCENE-1974: Fixed nasty bug in BooleanQuery (when it used\r
+   BooleanScorer for scoring), whereby some matching documents fail to\r
+   be collected.  (Fulin Tang via Mike McCandless)\r
+\r
+ * LUCENE-1124: Make sure FuzzyQuery always matches the precise term.\r
+   (stefatwork@gmail.com via Mike McCandless)\r
+\r
+ * LUCENE-1976: Fix IndexReader.isCurrent() to return the right thing\r
+   when the reader is a near real-time reader.  (Jake Mannix via Mike\r
+   McCandless)\r
+\r
+ * LUCENE-1986: Fix NPE when scoring PayloadNearQuery (Peter Keegan,\r
+   Mark Miller via Mike McCandless)\r
+\r
+ * LUCENE-1992: Fix thread hazard if a merge is committing just as an\r
+   exception occurs during sync (Uwe Schindler, Mike McCandless)\r
+\r
+ * LUCENE-1995: Note in javadocs that IndexWriter.setRAMBufferSizeMB\r
+   cannot exceed 2048 MB, and throw IllegalArgumentException if it\r
+   does.  (Aaron McKee, Yonik Seeley, Mike McCandless)\r
+\r
+ * LUCENE-2004: Fix Constants.LUCENE_MAIN_VERSION to not be inlined\r
+   by client code.  (Uwe Schindler)\r
+\r
+ * LUCENE-2016: Replace illegal U+FFFF character with the replacement\r
+   char (U+FFFD) during indexing, to prevent silent index corruption.\r
+   (Peter Keegan, Mike McCandless)\r
+\r
+API Changes\r
+\r
+ * Un-deprecate search(Weight weight, Filter filter, int n) from\r
+   Searchable interface (deprecated by accident).  (Uwe Schindler)\r
+\r
+ * Un-deprecate o.a.l.util.Version constants.  (Mike McCandless)\r
+\r
+ * LUCENE-1987: Un-deprecate some ctors of Token, as they will not\r
+   be removed in 3.0 and are still useful. Also add some missing\r
+   o.a.l.util.Version constants for enabling invalid acronym\r
+   settings in StandardAnalyzer to be compatible with the coming\r
+   Lucene 3.0.  (Uwe Schindler)\r
+\r
+ * LUCENE-1973: Un-deprecate IndexSearcher.setDefaultFieldSortScoring,\r
+   to allow controlling per-IndexSearcher whether scores are computed\r
+   when sorting by field.  (Uwe Schindler, Mike McCandless)\r
+   \r
+Documentation\r
+\r
+ * LUCENE-1955: Fix Hits deprecation notice to point users in right\r
+   direction. (Mike McCandless, Mark Miller)\r
+   \r
+ * Fix javadoc about score tracking done by search methods in Searcher \r
+   and IndexSearcher.  (Mike McCandless)\r
+\r
+ * LUCENE-2008: Javadoc improvements for TokenStream/Tokenizer/Token\r
+   (Luke Nezda via Mike McCandless)\r
+\r
+======================= Release 2.9.0 2009-09-23 =======================\r
+\r
+Changes in backwards compatibility policy\r
+\r
+ * LUCENE-1575: Searchable.search(Weight, Filter, int, Sort) no\r
+    longer computes a document score for each hit by default.  If\r
+    document score tracking is still needed, you can call\r
+    IndexSearcher.setDefaultFieldSortScoring(true, true) to enable\r
+    both per-hit and maxScore tracking; however, this is deprecated\r
+    and will be removed in 3.0.\r
+\r
+    Alternatively, use Searchable.search(Weight, Filter, Collector)\r
+    and pass in a TopFieldCollector instance, using the following code\r
+    sample:\r
\r
+    <code>\r
+      TopFieldCollector tfc = TopFieldCollector.create(sort, numHits, fillFields, \r
+                                                       true /* trackDocScores */,\r
+                                                       true /* trackMaxScore */,\r
+                                                       false /* docsInOrder */);\r
+      searcher.search(query, tfc);\r
+      TopDocs results = tfc.topDocs();\r
+    </code>\r
+\r
+    Note that your Sort object cannot use SortField.AUTO when you\r
+    directly instantiate TopFieldCollector.\r
+\r
+    Also, the method search(Weight, Filter, Collector) was added to\r
+    the Searchable interface and the Searcher abstract class to\r
+    replace the deprecated HitCollector versions.  If you either\r
+    implement Searchable or extend Searcher, you should change your\r
+    code to implement this method.  If you already extend\r
+    IndexSearcher, no further changes are needed to use Collector.\r
+    \r
+    Finally, the values Float.NaN and Float.NEGATIVE_INFINITY are not\r
+    valid scores.  Lucene uses these values internally in certain\r
+    places, so if you have hits with such scores, it will cause\r
+    problems. (Shai Erera via Mike McCandless)\r
+\r
+ * LUCENE-1687: All methods and parsers from the interface ExtendedFieldCache\r
+    have been moved into FieldCache. ExtendedFieldCache is now deprecated and\r
+    contains only a few declarations for binary backwards compatibility. \r
+    ExtendedFieldCache will be removed in version 3.0. Users of FieldCache and \r
+    ExtendedFieldCache will be able to plug in Lucene 2.9 without recompilation.\r
+    The auto cache (FieldCache.getAuto) is now deprecated. Due to the merge of\r
+    ExtendedFieldCache and FieldCache, FieldCache can now additionally return\r
+    long[] and double[] arrays in addition to int[] and float[] and StringIndex.\r
+    \r
+    The interface changes are only notable for users implementing the interfaces,\r
+    which was unlikely done, because there is no possibility to change\r
+    Lucene's FieldCache implementation.  (Grant Ingersoll, Uwe Schindler)\r
+    \r
+ * LUCENE-1630, LUCENE-1771: Weight, previously an interface, is now an abstract \r
+    class. Some of the method signatures have changed, but it should be fairly\r
+    easy to see what adjustments must be made to existing code to sync up\r
+    with the new API. You can find more detail in the API Changes section.\r
+    \r
+    Going forward Searchable will be kept for convenience only and may\r
+    be changed between minor releases without any deprecation\r
+    process. It is not recommended that you implement it, but rather extend\r
+    Searcher.  \r
+    (Shai Erera, Chris Hostetter, Martin Ruckli, Mark Miller via Mike McCandless)\r
+\r
+ * LUCENE-1422, LUCENE-1693: The new Attribute based TokenStream API (see below)\r
+    has some backwards breaks in rare cases. We did our best to make the \r
+    transition as easy as possible and you are not likely to run into any problems. \r
+    If your tokenizers still implement next(Token) or next(), the calls are \r
+    automatically wrapped. The indexer and query parser use the new API \r
+    (eg use incrementToken() calls). All core TokenStreams are implemented using \r
+    the new API. You can mix old and new API style TokenFilters/TokenStream. \r
+    Problems only occur when you have done the following:\r
+    You have overridden next(Token) or next() in one of the non-abstract core\r
+    TokenStreams/-Filters. These classes should normally be final, but some\r
+    of them are not. In this case, next(Token)/next() would never be called.\r
+    To fail early with a hard compile/runtime error, the next(Token)/next()\r
+    methods in these TokenStreams/-Filters were made final in this release.\r
+    (Michael Busch, Uwe Schindler)\r
+\r
+ * LUCENE-1763: MergePolicy now requires an IndexWriter instance to\r
+    be passed upon instantiation. As a result, IndexWriter was removed\r
+    as a method argument from all MergePolicy methods. (Shai Erera via\r
+    Mike McCandless)\r
+    \r
+ * LUCENE-1748: LUCENE-1001 introduced PayloadSpans, but this was a back\r
+    compat break and caused custom SpanQuery implementations to fail at runtime\r
+    in a variety of ways. This issue attempts to remedy things by causing\r
+    a compile time break on custom SpanQuery implementations and removing \r
+    the PayloadSpans class, with its functionality now moved to Spans. To\r
+    help in alleviating future back compat pain, Spans has been changed from\r
+    an interface to an abstract class.\r
+    (Hugh Cayless, Mark Miller)\r
+    \r
+ * LUCENE-1808: Query.createWeight has been changed from protected to\r
+    public. This will be a back compat break if you have overridden this\r
+    method - but you are likely already affected by the LUCENE-1693 (make Weight \r
+    abstract rather than an interface) back compat break if you have overridden \r
+    Query.creatWeight, so we have taken the opportunity to make this change.\r
+    (Tim Smith, Shai Erera via Mark Miller)\r
+\r
+ * LUCENE-1708 - IndexReader.document() no longer checks if the document is \r
+    deleted. You can call IndexReader.isDeleted(n) prior to calling document(n).\r
+    (Shai Erera via Mike McCandless)\r
+\r
\r
+Changes in runtime behavior\r
+\r
+ * LUCENE-1424: QueryParser now by default uses constant score auto\r
+    rewriting when it generates a WildcardQuery and PrefixQuery (it\r
+    already does so for TermRangeQuery, as well).  Call\r
+    setMultiTermRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE)\r
+    to revert to slower BooleanQuery rewriting method.  (Mark Miller via Mike\r
+    McCandless)\r
+    \r
+ * LUCENE-1575: As of 2.9, the core collectors as well as\r
+    IndexSearcher's search methods that return top N results, no\r
+    longer filter documents with scores <= 0.0. If you rely on this\r
+    functionality you can use PositiveScoresOnlyCollector like this:\r
+\r
+    <code>\r
+      TopDocsCollector tdc = new TopScoreDocCollector(10);\r
+      Collector c = new PositiveScoresOnlyCollector(tdc);\r
+      searcher.search(query, c);\r
+      TopDocs hits = tdc.topDocs();\r
+      ...\r
+    </code>\r
+\r
+ * LUCENE-1604: IndexReader.norms(String field) is now allowed to\r
+    return null if the field has no norms, as long as you've\r
+    previously called IndexReader.setDisableFakeNorms(true).  This\r
+    setting now defaults to false (to preserve the fake norms back\r
+    compatible behavior) but in 3.0 will be hardwired to true.  (Shon\r
+    Vella via Mike McCandless).\r
+\r
+ * LUCENE-1624: If you open IndexWriter with create=true and\r
+    autoCommit=false on an existing index, IndexWriter no longer\r
+    writes an empty commit when it's created.  (Paul Taylor via Mike\r
+    McCandless)\r
+\r
+ * LUCENE-1593: When you call Sort() or Sort.setSort(String field,\r
+    boolean reverse), the resulting SortField array no longer ends\r
+    with SortField.FIELD_DOC (it was unnecessary as Lucene breaks ties\r
+    internally by docID). (Shai Erera via Michael McCandless)\r
+\r
+ * LUCENE-1542: When the first token(s) have 0 position increment,\r
+    IndexWriter used to incorrectly record the position as -1, if no\r
+    payload is present, or Integer.MAX_VALUE if a payload is present.\r
+    This causes positional queries to fail to match.  The bug is now\r
+    fixed, but if your app relies on the buggy behavior then you must\r
+    call IndexWriter.setAllowMinus1Position().  That API is deprecated\r
+    so you must fix your application, and rebuild your index, to not\r
+    rely on this behavior by the 3.0 release of Lucene. (Jonathan\r
+    Mamou, Mark Miller via Mike McCandless)\r
+\r
+\r
+ * LUCENE-1715: Finalizers have been removed from the 4 core classes\r
+    that still had them, since they will cause GC to take longer, thus\r
+    tying up memory for longer, and at best they mask buggy app code.\r
+    DirectoryReader (returned from IndexReader.open) & IndexWriter\r
+    previously released the write lock during finalize.\r
+    SimpleFSDirectory.FSIndexInput closed the descriptor in its\r
+    finalizer, and NativeFSLock released the lock.  It's possible\r
+    applications will be affected by this, but only if the application\r
+    is failing to close reader/writers.  (Brian Groose via Mike\r
+    McCandless)\r
+\r
+ * LUCENE-1717: Fixed IndexWriter to account for RAM usage of\r
+    buffered deletions.  (Mike McCandless)\r
+\r
+ * LUCENE-1727: Ensure that fields are stored & retrieved in the\r
+    exact order in which they were added to the document.  This was\r
+    true in all Lucene releases before 2.3, but was broken in 2.3 and\r
+    2.4, and is now fixed in 2.9.  (Mike McCandless)\r
+\r
+ * LUCENE-1678: The addition of Analyzer.reusableTokenStream\r
+    accidentally broke back compatibility of external analyzers that\r
+    subclassed core analyzers that implemented tokenStream but not\r
+    reusableTokenStream.  This is now fixed, such that if\r
+    reusableTokenStream is invoked on such a subclass, that method\r
+    will forcefully fallback to tokenStream.  (Mike McCandless)\r
+    \r
+ * LUCENE-1801: Token.clear() and Token.clearNoTermBuffer() now also clear\r
+    startOffset, endOffset and type. This is not likely to affect any\r
+    Tokenizer chains, as Tokenizers normally always set these three values.\r
+    This change was made to be conform to the new AttributeImpl.clear() and\r
+    AttributeSource.clearAttributes() to work identical for Token as one for all\r
+    AttributeImpl and the 6 separate AttributeImpls. (Uwe Schindler, Michael Busch)\r
+\r
+ * LUCENE-1483: When searching over multiple segments, a new Scorer is now created \r
+    for each segment. Searching has been telescoped out a level and IndexSearcher now\r
+    operates much like MultiSearcher does. The Weight is created only once for the top \r
+    level Searcher, but each Scorer is passed a per-segment IndexReader. This will \r
+    result in doc ids in the Scorer being internal to the per-segment IndexReader. It \r
+    has always been outside of the API to count on a given IndexReader to contain every \r
+    doc id in the index - and if you have been ignoring MultiSearcher in your custom code \r
+    and counting on this fact, you will find your code no longer works correctly. If a \r
+    custom Scorer implementation uses any caches/filters that rely on being based on the \r
+    top level IndexReader, it will need to be updated to correctly use contextless \r
+    caches/filters eg you can't count on the IndexReader to contain any given doc id or \r
+    all of the doc ids. (Mark Miller, Mike McCandless)\r
+\r
+ * LUCENE-1846: DateTools now uses the US locale to format the numbers in its\r
+    date/time strings instead of the default locale. For most locales there will\r
+    be no change in the index format, as DateFormatSymbols is using ASCII digits.\r
+    The usage of the US locale is important to guarantee correct ordering of\r
+    generated terms.  (Uwe Schindler)\r
+\r
+ * LUCENE-1860: MultiTermQuery now defaults to\r
+    CONSTANT_SCORE_AUTO_REWRITE_DEFAULT rewrite method (previously it\r
+    was SCORING_BOOLEAN_QUERY_REWRITE).  This means that PrefixQuery\r
+    and WildcardQuery will now produce constant score for all matching\r
+    docs, equal to the boost of the query.  (Mike McCandless)\r
+\r
+API Changes\r
+\r
+ * LUCENE-1419: Add expert API to set custom indexing chain. This API is \r
+   package-protected for now, so we don't have to officially support it.\r
+   Yet, it will give us the possibility to try out different consumers\r
+   in the chain. (Michael Busch)\r
+\r
+ * LUCENE-1427: DocIdSet.iterator() is now allowed to throw\r
+   IOException.  (Paul Elschot, Mike McCandless)\r
+\r
+ * LUCENE-1422, LUCENE-1693: New TokenStream API that uses a new class called \r
+   AttributeSource instead of the Token class, which is now a utility class that\r
+   holds common Token attributes. All attributes that the Token class had have \r
+   been moved into separate classes: TermAttribute, OffsetAttribute, \r
+   PositionIncrementAttribute, PayloadAttribute, TypeAttribute and FlagsAttribute. \r
+   The new API is much more flexible; it allows to combine the Attributes \r
+   arbitrarily and also to define custom Attributes. The new API has the same \r
+   performance as the old next(Token) approach. For conformance with this new \r
+   API Tee-/SinkTokenizer was deprecated and replaced by a new TeeSinkTokenFilter. \r
+   (Michael Busch, Uwe Schindler; additional contributions and bug fixes by \r
+   Daniel Shane, Doron Cohen)\r
+\r
+ * LUCENE-1467: Add nextDoc() and next(int) methods to OpenBitSetIterator.\r
+   These methods can be used to avoid additional calls to doc(). \r
+   (Michael Busch)\r
+\r
+ * LUCENE-1468: Deprecate Directory.list(), which sometimes (in\r
+   FSDirectory) filters out files that don't look like index files, in\r
+   favor of new Directory.listAll(), which does no filtering.  Also,\r
+   listAll() will never return null; instead, it throws an IOException\r
+   (or subclass).  Specifically, FSDirectory.listAll() will throw the\r
+   newly added NoSuchDirectoryException if the directory does not\r
+   exist.  (Marcel Reutegger, Mike McCandless)\r
+\r
+ * LUCENE-1546: Add IndexReader.flush(Map commitUserData), allowing\r
+   you to record an opaque commitUserData (maps String -> String) into\r
+   the commit written by IndexReader.  This matches IndexWriter's\r
+   commit methods.  (Jason Rutherglen via Mike McCandless)\r
+\r
+ * LUCENE-652: Added org.apache.lucene.document.CompressionTools, to\r
+   enable compressing & decompressing binary content, external to\r
+   Lucene's indexing.  Deprecated Field.Store.COMPRESS.\r
+\r
+ * LUCENE-1561: Renamed Field.omitTf to Field.omitTermFreqAndPositions\r
+    (Otis Gospodnetic via Mike McCandless)\r
+  \r
+ * LUCENE-1500: Added new InvalidTokenOffsetsException to Highlighter methods\r
+    to denote issues when offsets in TokenStream tokens exceed the length of the\r
+    provided text.  (Mark Harwood)\r
+    \r
+ * LUCENE-1575, LUCENE-1483: HitCollector is now deprecated in favor of \r
+    a new Collector abstract class. For easy migration, people can use\r
+    HitCollectorWrapper which translates (wraps) HitCollector into\r
+    Collector. Note that this class is also deprecated and will be\r
+    removed when HitCollector is removed.  Also TimeLimitedCollector\r
+    is deprecated in favor of the new TimeLimitingCollector which\r
+    extends Collector.  (Shai Erera, Mark Miller, Mike McCandless)\r
+\r
+ * LUCENE-1592: The method TermsEnum.skipTo() was deprecated, because\r
+    it is used nowhere in core/contrib and there is only a very ineffective\r
+    default implementation available. If you want to position a TermEnum\r
+    to another Term, create a new one using IndexReader.terms(Term).\r
+    (Uwe Schindler)\r
+\r
+ * LUCENE-1621: MultiTermQuery.getTerm() has been deprecated as it does\r
+    not make sense for all subclasses of MultiTermQuery. Check individual\r
+    subclasses to see if they support getTerm().  (Mark Miller)\r
+\r
+ * LUCENE-1636: Make TokenFilter.input final so it's set only\r
+    once. (Wouter Heijke, Uwe Schindler via Mike McCandless).\r
+\r
+ * LUCENE-1658, LUCENE-1451: Renamed FSDirectory to SimpleFSDirectory\r
+    (but left an FSDirectory base class).  Added an FSDirectory.open\r
+    static method to pick a good default FSDirectory implementation\r
+    given the OS. FSDirectories should now be instantiated using\r
+    FSDirectory.open or with public constructors rather than\r
+    FSDirectory.getDirectory(), which has been deprecated.\r
+    (Michael McCandless, Uwe Schindler, yonik)\r
+\r
+ * LUCENE-1665: Deprecate SortField.AUTO, to be removed in 3.0.\r
+    Instead, when sorting by field, the application should explicitly\r
+    state the type of the field.  (Mike McCandless)\r
+\r
+ * LUCENE-1660: StopFilter, StandardAnalyzer, StopAnalyzer now\r
+    require up front specification of enablePositionIncrement (Mike\r
+    McCandless)\r
+\r
+ * LUCENE-1614: DocIdSetIterator's next() and skipTo() were deprecated in favor\r
+    of the new nextDoc() and advance(). The new methods return the doc Id they \r
+    landed on, saving an extra call to doc() in most cases.\r
+    For easy migration of the code, you can change the calls to next() to \r
+    nextDoc() != DocIdSetIterator.NO_MORE_DOCS and similarly for skipTo(). \r
+    However it is advised that you take advantage of the returned doc ID and not \r
+    call doc() following those two.\r
+    Also, doc() was deprecated in favor of docID(). docID() should return -1 or \r
+    NO_MORE_DOCS if nextDoc/advance were not called yet, or NO_MORE_DOCS if the \r
+    iterator has exhausted. Otherwise it should return the current doc ID.\r
+    (Shai Erera via Mike McCandless)\r
+\r
+ * LUCENE-1672: All ctors/opens and other methods using String/File to\r
+    specify the directory in IndexReader, IndexWriter, and IndexSearcher\r
+    were deprecated. You should instantiate the Directory manually before\r
+    and pass it to these classes (LUCENE-1451, LUCENE-1658).\r
+    (Uwe Schindler)\r
+\r
+ * LUCENE-1407: Move RemoteSearchable, RemoteCachingWrapperFilter out\r
+    of Lucene's core into new contrib/remote package.  Searchable no\r
+    longer extends java.rmi.Remote (Simon Willnauer via Mike\r
+    McCandless)\r
+\r
+ * LUCENE-1677: The global property\r
+    org.apache.lucene.SegmentReader.class, and\r
+    ReadOnlySegmentReader.class are now deprecated, to be removed in\r
+    3.0.  src/gcj/* has been removed. (Earwin Burrfoot via Mike\r
+    McCandless)\r
+\r
+ * LUCENE-1673: Deprecated NumberTools in favour of the new\r
+    NumericRangeQuery and its new indexing format for numeric or\r
+    date values.  (Uwe Schindler)\r
+    \r
+ * LUCENE-1630, LUCENE-1771: Weight is now an abstract class, and adds\r
+    a scorer(IndexReader, boolean /* scoreDocsInOrder */, boolean /*\r
+    topScorer */) method instead of scorer(IndexReader). IndexSearcher uses \r
+    this method to obtain a scorer matching the capabilities of the Collector \r
+    wrt orderedness of docIDs. Some Scorers (like BooleanScorer) are much more\r
+    efficient if out-of-order documents scoring is allowed by a Collector.  \r
+    Collector must now implement acceptsDocsOutOfOrder. If you write a \r
+    Collector which does not care about doc ID orderness, it is recommended \r
+    that you return true.  Weight has a scoresDocsOutOfOrder method, which by \r
+    default returns false.  If you create a Weight which will score documents \r
+    out of order if requested, you should override that method to return true. \r
+    BooleanQuery's setAllowDocsOutOfOrder and getAllowDocsOutOfOrder have been \r
+    deprecated as they are not needed anymore. BooleanQuery will now score docs \r
+    out of order when used with a Collector that can accept docs out of order.\r
+    Finally, Weight#explain now takes a sub-reader and sub-docID, rather than\r
+    a top level reader and docID.\r
+    (Shai Erera, Chris Hostetter, Martin Ruckli, Mark Miller via Mike McCandless)\r
+       \r
+ * LUCENE-1466, LUCENE-1906: Added CharFilter and MappingCharFilter, which allows\r
+    chaining & mapping of characters before tokenizers run. CharStream (subclass of\r
+    Reader) is the base class for custom java.io.Reader's, that support offset\r
+    correction. Tokenizers got an additional method correctOffset() that is passed\r
+    down to the underlying CharStream if input is a subclass of CharStream/-Filter.\r
+    (Koji Sekiguchi via Mike McCandless, Uwe Schindler)\r
+\r
+ * LUCENE-1703: Add IndexWriter.waitForMerges.  (Tim Smith via Mike\r
+    McCandless)\r
+\r
+ * LUCENE-1625: CheckIndex's programmatic API now returns separate\r
+    classes detailing the status of each component in the index, and\r
+    includes more detailed status than previously.  (Tim Smith via\r
+    Mike McCandless)\r
+\r
+ * LUCENE-1713: Deprecated RangeQuery and RangeFilter and renamed to\r
+    TermRangeQuery and TermRangeFilter. TermRangeQuery is in constant\r
+    score auto rewrite mode by default. The new classes also have new\r
+    ctors taking field and term ranges as Strings (see also\r
+    LUCENE-1424).  (Uwe Schindler)\r
+\r
+ * LUCENE-1609: The termInfosIndexDivisor must now be specified\r
+    up-front when opening the IndexReader.  Attempts to call\r
+    IndexReader.setTermInfosIndexDivisor will hit an\r
+    UnsupportedOperationException.  This was done to enable removal of\r
+    all synchronization in TermInfosReader, which previously could\r
+    cause threads to pile up in certain cases. (Dan Rosher via Mike\r
+    McCandless)\r
+    \r
+ * LUCENE-1688: Deprecate static final String stop word array in and \r
+    StopAnalzyer and replace it with an immutable implementation of \r
+    CharArraySet.  (Simon Willnauer via Mark Miller)\r
+\r
+ * LUCENE-1742: SegmentInfos, SegmentInfo and SegmentReader have been\r
+    made public as expert, experimental APIs.  These APIs may suddenly\r
+    change from release to release (Jason Rutherglen via Mike\r
+    McCandless).\r
+    \r
+ * LUCENE-1754: QueryWeight.scorer() can return null if no documents\r
+    are going to be matched by the query. Similarly,\r
+    Filter.getDocIdSet() can return null if no documents are going to\r
+    be accepted by the Filter. Note that these 'can' return null,\r
+    however they don't have to and can return a Scorer/DocIdSet which\r
+    does not match / reject all documents.  This is already the\r
+    behavior of some QueryWeight/Filter implementations, and is\r
+    documented here just for emphasis. (Shai Erera via Mike\r
+    McCandless)\r
+\r
+ * LUCENE-1705: Added IndexWriter.deleteAllDocuments.  (Tim Smith via\r
+    Mike McCandless)\r
+\r
+ * LUCENE-1460: Changed TokenStreams/TokenFilters in contrib to\r
+    use the new TokenStream API. (Robert Muir, Michael Busch)\r
+\r
+ * LUCENE-1748: LUCENE-1001 introduced PayloadSpans, but this was a back\r
+    compat break and caused custom SpanQuery implementations to fail at runtime\r
+    in a variety of ways. This issue attempts to remedy things by causing\r
+    a compile time break on custom SpanQuery implementations and removing \r
+    the PayloadSpans class, with its functionality now moved to Spans. To\r
+    help in alleviating future back compat pain, Spans has been changed from\r
+    an interface to an abstract class.\r
+    (Hugh Cayless, Mark Miller)\r
+    \r
+ * LUCENE-1808: Query.createWeight has been changed from protected to\r
+    public. (Tim Smith, Shai Erera via Mark Miller)\r
+\r
+ * LUCENE-1826: Add constructors that take AttributeSource and\r
+    AttributeFactory to all Tokenizer implementations.\r
+    (Michael Busch)\r
+    \r
+ * LUCENE-1847: Similarity#idf for both a Term and Term Collection have\r
+    been deprecated. New versions that return an IDFExplanation have been\r
+    added.  (Yasoja Seneviratne, Mike McCandless, Mark Miller)\r
+    \r
+ * LUCENE-1877: Made NativeFSLockFactory the default for\r
+    the new FSDirectory API (open(), FSDirectory subclass ctors).\r
+    All FSDirectory system properties were deprecated and all lock\r
+    implementations use no lock prefix if the locks are stored inside\r
+    the index directory. Because the deprecated String/File ctors of\r
+    IndexWriter and IndexReader (LUCENE-1672) and FSDirectory.getDirectory()\r
+    still use the old SimpleFSLockFactory and the new API\r
+    NativeFSLockFactory, we strongly recommend not to mix deprecated\r
+    and new API. (Uwe Schindler, Mike McCandless)\r
+\r
+ * LUCENE-1911: Added a new method isCacheable() to DocIdSet. This method\r
+    should return true, if the underlying implementation does not use disk\r
+    I/O and is fast enough to be directly cached by CachingWrapperFilter.\r
+    OpenBitSet, SortedVIntList, and DocIdBitSet are such candidates.\r
+    The default implementation of the abstract DocIdSet class returns false.\r
+    In this case, CachingWrapperFilter copies the DocIdSetIterator into an\r
+    OpenBitSet for caching.  (Uwe Schindler, Thomas Becker)\r
+\r
+Bug fixes\r
+\r
+ * LUCENE-1415: MultiPhraseQuery has incorrect hashCode() and equals()\r
+   implementation - Leads to Solr Cache misses. \r
+   (Todd Feak, Mark Miller via yonik)\r
+\r
+ * LUCENE-1327: Fix TermSpans#skipTo() to behave as specified in javadocs\r
+   of Terms#skipTo(). (Michael Busch)\r
+\r
+ * LUCENE-1573: Do not ignore InterruptedException (caused by\r
+   Thread.interrupt()) nor enter deadlock/spin loop. Now, an interrupt\r
+   will cause a RuntimeException to be thrown.  In 3.0 we will change\r
+   public APIs to throw InterruptedException.  (Jeremy Volkman via\r
+   Mike McCandless)\r
+\r
+ * LUCENE-1590: Fixed stored-only Field instances do not change the\r
+   value of omitNorms, omitTermFreqAndPositions in FieldInfo; when you\r
+   retrieve such fields they will now have omitNorms=true and\r
+   omitTermFreqAndPositions=false (though these values are unused).\r
+   (Uwe Schindler via Mike McCandless)\r
+\r
+ * LUCENE-1587: RangeQuery#equals() could consider a RangeQuery\r
+   without a collator equal to one with a collator.\r
+   (Mark Platvoet via Mark Miller) \r
+\r
+ * LUCENE-1600: Don't call String.intern unnecessarily in some cases\r
+   when loading documents from the index.  (P Eger via Mike\r
+   McCandless)\r
+\r
+ * LUCENE-1611: Fix case where OutOfMemoryException in IndexWriter\r
+   could cause "infinite merging" to happen.  (Christiaan Fluit via\r
+   Mike McCandless)\r
+\r
+ * LUCENE-1623: Properly handle back-compatibility of 2.3.x indexes that\r
+   contain field names with non-ascii characters.  (Mike Streeton via\r
+   Mike McCandless)\r
+\r
+ * LUCENE-1593: MultiSearcher and ParallelMultiSearcher did not break ties (in \r
+   sort) by doc Id in a consistent manner (i.e., if Sort.FIELD_DOC was used vs. \r
+   when it wasn't). (Shai Erera via Michael McCandless)\r
+\r
+ * LUCENE-1647: Fix case where IndexReader.undeleteAll would cause\r
+    the segment's deletion count to be incorrect. (Mike McCandless)\r
+\r
+ * LUCENE-1542: When the first token(s) have 0 position increment,\r
+    IndexWriter used to incorrectly record the position as -1, if no\r
+    payload is present, or Integer.MAX_VALUE if a payload is present.\r
+    This causes positional queries to fail to match.  The bug is now\r
+    fixed, but if your app relies on the buggy behavior then you must\r
+    call IndexWriter.setAllowMinus1Position().  That API is deprecated\r
+    so you must fix your application, and rebuild your index, to not\r
+    rely on this behavior by the 3.0 release of Lucene. (Jonathan\r
+    Mamou, Mark Miller via Mike McCandless)\r
+\r
+ * LUCENE-1658: Fixed MMapDirectory to correctly throw IOExceptions\r
+    on EOF, removed numeric overflow possibilities and added support\r
+    for a hack to unmap the buffers on closing IndexInput.\r
+    (Uwe Schindler)\r
+    \r
+ * LUCENE-1681: Fix infinite loop caused by a call to DocValues methods \r
+    getMinValue, getMaxValue, getAverageValue. (Simon Willnauer via Mark Miller)\r
+\r
+ * LUCENE-1599: Add clone support for SpanQuerys. SpanRegexQuery counts\r
+    on this functionality and does not work correctly without it.\r
+    (Billow Gao, Mark Miller)\r
+\r
+ * LUCENE-1718: Fix termInfosIndexDivisor to carry over to reopened\r
+    readers (Mike McCandless)\r
+    \r
+ * LUCENE-1583: SpanOrQuery skipTo() doesn't always move forwards as Spans\r
+       documentation indicates it should.  (Moti Nisenson via Mark Miller)\r
+\r
+ * LUCENE-1566: Sun JVM Bug\r
+    http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=6478546 causes\r
+    invalid OutOfMemoryError when reading too many bytes at once from\r
+    a file on 32bit JVMs that have a large maximum heap size.  This\r
+    fix adds set/getReadChunkSize to FSDirectory so that large reads\r
+    are broken into chunks, to work around this JVM bug.  On 32bit\r
+    JVMs the default chunk size is 100 MB; on 64bit JVMs, which don't\r
+    show the bug, the default is Integer.MAX_VALUE. (Simon Willnauer\r
+    via Mike McCandless)\r
+    \r
+ * LUCENE-1448: Added TokenStream.end() to perform end-of-stream\r
+    operations (ie to return the end offset of the tokenization).  \r
+    This is important when multiple fields with the same name are added\r
+    to a document, to ensure offsets recorded in term vectors for all \r
+    of the instances are correct.  \r
+    (Mike McCandless, Mark Miller, Michael Busch)\r
+\r
+ * LUCENE-1805: CloseableThreadLocal did not allow a null Object in get(), \r
+    although it does allow it in set(Object). Fix get() to not assert the object\r
+    is not null. (Shai Erera via Mike McCandless)\r
+    \r
+ * LUCENE-1801: Changed all Tokenizers or TokenStreams in core/contrib)\r
+    that are the source of Tokens to always call\r
+    AttributeSource.clearAttributes() first. (Uwe Schindler)\r
+    \r
+ * LUCENE-1819: MatchAllDocsQuery.toString(field) should produce output\r
+    that is parsable by the QueryParser.  (John Wang, Mark Miller)\r
+\r
+ * LUCENE-1836: Fix localization bug in the new query parser and add \r
+    new LocalizedTestCase as base class for localization junit tests.\r
+    (Robert Muir, Uwe Schindler via Michael Busch)\r
+\r
+ * LUCENE-1847: PhraseQuery/TermQuery/SpanQuery use IndexReader specific stats \r
+    in their Weight#explain methods - these stats should be corpus wide.\r
+    (Yasoja Seneviratne, Mike McCandless, Mark Miller)\r
+\r
+ * LUCENE-1885: Fix the bug that NativeFSLock.isLocked() did not work,\r
+    if the lock was obtained by another NativeFSLock(Factory) instance.\r
+    Because of this IndexReader.isLocked() and IndexWriter.isLocked() did\r
+    not work correctly.  (Uwe Schindler)\r
+\r
+ * LUCENE-1899: Fix O(N^2) CPU cost when setting docIDs in order in an\r
+    OpenBitSet, due to an inefficiency in how the underlying storage is\r
+    reallocated.  (Nadav Har'El via Mike McCandless)\r
+\r
+ * LUCENE-1918: Fixed cases where a ParallelReader would\r
+   generate exceptions on being passed to\r
+   IndexWriter.addIndexes(IndexReader[]).  First case was when the\r
+   ParallelReader was empty.  Second case was when the ParallelReader\r
+   used to contain documents with TermVectors, but all such documents\r
+   have been deleted. (Christian Kohlschรผtter via Mike McCandless)\r
+\r
+New features\r
+\r
+ * LUCENE-1411: Added expert API to open an IndexWriter on a prior\r
+    commit, obtained from IndexReader.listCommits.  This makes it\r
+    possible to rollback changes to an index even after you've closed\r
+    the IndexWriter that made the changes, assuming you are using an\r
+    IndexDeletionPolicy that keeps past commits around.  This is useful\r
+    when building transactional support on top of Lucene.  (Mike\r
+    McCandless)\r
+\r
+ * LUCENE-1382: Add an optional arbitrary Map (String -> String)\r
+    "commitUserData" to IndexWriter.commit(), which is stored in the\r
+    segments file and is then retrievable via\r
+    IndexReader.getCommitUserData instance and static methods.\r
+    (Shalin Shekhar Mangar via Mike McCandless)\r
+\r
+ * LUCENE-1420: Similarity now has a computeNorm method that allows\r
+    custom Similarity classes to override how norm is computed.  It's\r
+    provided a FieldInvertState instance that contains details from\r
+    inverting the field.  The default impl is boost *\r
+    lengthNorm(numTerms), to be backwards compatible.  Also added\r
+    {set/get}DiscountOverlaps to DefaultSimilarity, to control whether\r
+    overlapping tokens (tokens with 0 position increment) should be\r
+    counted in lengthNorm.  (Andrzej Bialecki via Mike McCandless)\r
+\r
+ * LUCENE-1424: Moved constant score query rewrite capability into\r
+    MultiTermQuery, allowing TermRangeQuery, PrefixQuery and WildcardQuery\r
+    to switch between constant-score rewriting or BooleanQuery\r
+    expansion rewriting via a new setRewriteMethod method.\r
+    Deprecated ConstantScoreRangeQuery (Mark Miller via Mike\r
+    McCandless)\r
+\r
+ * LUCENE-1461: Added FieldCacheRangeFilter, a RangeFilter for\r
+    single-term fields that uses FieldCache to compute the filter.  If\r
+    your documents all have a single term for a given field, and you\r
+    need to create many RangeFilters with varying lower/upper bounds,\r
+    then this is likely a much faster way to create the filters than\r
+    RangeFilter.  FieldCacheRangeFilter allows ranges on all data types,\r
+    FieldCache supports (term ranges, byte, short, int, long, float, double).\r
+    However, it comes at the expense of added RAM consumption and slower\r
+    first-time usage due to populating the FieldCache.  It also does not\r
+    support collation  (Tim Sturge, Matt Ericson via Mike McCandless and\r
+    Uwe Schindler)\r
+\r
+ * LUCENE-1296: add protected method CachingWrapperFilter.docIdSetToCache \r
+    to allow subclasses to choose which DocIdSet implementation to use\r
+    (Paul Elschot via Mike McCandless)\r
+    \r
+ * LUCENE-1390: Added ASCIIFoldingFilter, a Filter that converts \r
+    alphabetic, numeric, and symbolic Unicode characters which are not in \r
+    the first 127 ASCII characters (the "Basic Latin" Unicode block) into \r
+    their ASCII equivalents, if one exists. ISOLatin1AccentFilter, which\r
+    handles a subset of this filter, has been deprecated.\r
+    (Andi Vajda, Steven Rowe via Mark Miller)\r
+\r
+ * LUCENE-1478: Added new SortField constructor allowing you to\r
+    specify a custom FieldCache parser to generate numeric values from\r
+    terms for a field.  (Uwe Schindler via Mike McCandless)\r
+\r
+ * LUCENE-1528: Add support for Ideographic Space to the queryparser.\r
+    (Luis Alves via Michael Busch)\r
+\r
+ * LUCENE-1487: Added FieldCacheTermsFilter, to filter by multiple\r
+    terms on single-valued fields.  The filter loads the FieldCache\r
+    for the field the first time it's called, and subsequent usage of\r
+    that field, even with different Terms in the filter, are fast.\r
+    (Tim Sturge, Shalin Shekhar Mangar via Mike McCandless).\r
+\r
+ * LUCENE-1314: Add clone(), clone(boolean readOnly) and\r
+    reopen(boolean readOnly) to IndexReader.  Cloning an IndexReader\r
+    gives you a new reader which you can make changes to (deletions,\r
+    norms) without affecting the original reader.  Now, with clone or\r
+    reopen you can change the readOnly of the original reader.  (Jason\r
+    Rutherglen, Mike McCandless)\r
+\r
+ * LUCENE-1506: Added FilteredDocIdSet, an abstract class which you\r
+    subclass to implement the "match" method to accept or reject each\r
+    docID.  Unlike ChainedFilter (under contrib/misc),\r
+    FilteredDocIdSet never requires you to materialize the full\r
+    bitset.  Instead, match() is called on demand per docID.  (John\r
+    Wang via Mike McCandless)\r
+\r
+ * LUCENE-1398: Add ReverseStringFilter to contrib/analyzers, a filter\r
+    to reverse the characters in each token.  (Koji Sekiguchi via yonik)\r
+\r
+ * LUCENE-1551: Add expert IndexReader.reopen(IndexCommit) to allow\r
+    efficiently opening a new reader on a specific commit, sharing\r
+    resources with the original reader.  (Torin Danil via Mike\r
+    McCandless)\r
+\r
+ * LUCENE-1434: Added org.apache.lucene.util.IndexableBinaryStringTools,\r
+    to encode byte[] as String values that are valid terms, and\r
+    maintain sort order of the original byte[] when the bytes are\r
+    interpreted as unsigned.  (Steven Rowe via Mike McCandless)\r
+\r
+ * LUCENE-1543: Allow MatchAllDocsQuery to optionally use norms from\r
+    a specific fields to set the score for a document.  (Karl Wettin\r
+    via Mike McCandless)\r
+\r
+ * LUCENE-1586: Add IndexReader.getUniqueTermCount().  (Mike\r
+    McCandless via Derek)\r
+\r
+ * LUCENE-1516: Added "near real-time search" to IndexWriter, via a\r
+    new expert getReader() method.  This method returns a reader that\r
+    searches the full index, including any uncommitted changes in the\r
+    current IndexWriter session.  This should result in a faster\r
+    turnaround than the normal approach of commiting the changes and\r
+    then reopening a reader.  (Jason Rutherglen via Mike McCandless)\r
+\r
+ * LUCENE-1603: Added new MultiTermQueryWrapperFilter, to wrap any\r
+    MultiTermQuery as a Filter.  Also made some improvements to\r
+    MultiTermQuery: return DocIdSet.EMPTY_DOCIDSET if there are no\r
+    terms in the enum; track the total number of terms it visited\r
+    during rewrite (getTotalNumberOfTerms).  FilteredTermEnum is also\r
+    more friendly to subclassing.  (Uwe Schindler via Mike McCandless)\r
+\r
+ * LUCENE-1605: Added BitVector.subset().  (Jeremy Volkman via Mike\r
+    McCandless)\r
+    \r
+ * LUCENE-1618: Added FileSwitchDirectory that enables files with\r
+    specified extensions to be stored in a primary directory and the\r
+    rest of the files to be stored in the secondary directory.  For\r
+    example, this can be useful for the large doc-store (stored\r
+    fields, term vectors) files in FSDirectory and the rest of the\r
+    index files in a RAMDirectory. (Jason Rutherglen via Mike\r
+    McCandless)\r
+\r
+ * LUCENE-1494: Added FieldMaskingSpanQuery which can be used to\r
+    cross-correlate Spans from different fields.\r
+    (Paul Cowan and Chris Hostetter)\r
+\r
+ * LUCENE-1634: Add calibrateSizeByDeletes to LogMergePolicy, to take\r
+    deletions into account when considering merges.  (Yasuhiro Matsuda\r
+    via Mike McCandless)\r
+\r
+ * LUCENE-1550: Added new n-gram based String distance measure for spell checking.\r
+    See the Javadocs for NGramDistance.java for a reference paper on why\r
+    this is helpful (Tom Morton via Grant Ingersoll)\r
+\r
+ * LUCENE-1470, LUCENE-1582, LUCENE-1602, LUCENE-1673, LUCENE-1701, LUCENE-1712:\r
+    Added NumericRangeQuery and NumericRangeFilter, a fast alternative to\r
+    RangeQuery/RangeFilter for numeric searches. They depend on a specific\r
+    structure of terms in the index that can be created by indexing\r
+    using the new NumericField or NumericTokenStream classes. NumericField\r
+    can only be used for indexing and optionally stores the values as\r
+    string representation in the doc store. Documents returned from\r
+    IndexReader/IndexSearcher will return only the String value using\r
+    the standard Fieldable interface. NumericFields can be sorted on\r
+    and loaded into the FieldCache.  (Uwe Schindler, Yonik Seeley,\r
+    Mike McCandless)\r
+\r
+ * LUCENE-1405: Added support for Ant resource collections in contrib/ant\r
+    <index> task.  (Przemyslaw Sztoch via Erik Hatcher)\r
+\r
+ * LUCENE-1699: Allow setting a TokenStream on Field/Fieldable for indexing\r
+    in conjunction with any other ways to specify stored field values,\r
+    currently binary or string values.  (yonik)\r
+    \r
+ * LUCENE-1701: Made the standard FieldCache.Parsers public and added\r
+    parsers for fields generated using NumericField/NumericTokenStream.\r
+    All standard parsers now also implement Serializable and enforce\r
+    their singleton status.  (Uwe Schindler, Mike McCandless)\r
+    \r
+ * LUCENE-1741: User configurable maximum chunk size in MMapDirectory.\r
+    On 32 bit platforms, the address space can be very fragmented, so\r
+    one big ByteBuffer for the whole file may not fit into address space.\r
+    (Eks Dev via Uwe Schindler)\r
+\r
+ * LUCENE-1644: Enable 4 rewrite modes for queries deriving from\r
+    MultiTermQuery (WildcardQuery, PrefixQuery, TermRangeQuery,\r
+    NumericRangeQuery): CONSTANT_SCORE_FILTER_REWRITE first creates a\r
+    filter and then assigns constant score (boost) to docs;\r
+    CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE create a BooleanQuery but\r
+    uses a constant score (boost); SCORING_BOOLEAN_QUERY_REWRITE also\r
+    creates a BooleanQuery but keeps the BooleanQuery's scores;\r
+    CONSTANT_SCORE_AUTO_REWRITE tries to pick the most performant\r
+    constant-score rewrite method.  (Mike McCandless)\r
+    \r
+ * LUCENE-1448: Added TokenStream.end(), to perform end-of-stream\r
+    operations.  This is currently used to fix offset problems when \r
+    multiple fields with the same name are added to a document.\r
+    (Mike McCandless, Mark Miller, Michael Busch)\r
\r
+ * LUCENE-1776: Add an option to not collect payloads for an ordered\r
+    SpanNearQuery. Payloads were not lazily loaded in this case as\r
+    the javadocs implied. If you have payloads and want to use an ordered\r
+    SpanNearQuery that does not need to use the payloads, you can\r
+    disable loading them with a new constructor switch.  (Mark Miller)\r
+\r
+ * LUCENE-1341: Added PayloadNearQuery to enable SpanNearQuery functionality\r
+    with payloads (Peter Keegan, Grant Ingersoll, Mark Miller)\r
+\r
+ * LUCENE-1790: Added PayloadTermQuery to enable scoring of payloads\r
+    based on the maximum payload seen for a document.\r
+    Slight refactoring of Similarity and other payload queries (Grant Ingersoll, Mark Miller)\r
+\r
+ * LUCENE-1749: Addition of FieldCacheSanityChecker utility, and\r
+    hooks to use it in all existing Lucene Tests.  This class can\r
+    be used by any application to inspect the FieldCache and provide\r
+    diagnostic information about the possibility of inconsistent\r
+    FieldCache usage.  Namely: FieldCache entries for the same field\r
+    with different datatypes or parsers; and FieldCache entries for\r
+    the same field in both a reader, and one of it's (descendant) sub\r
+    readers. \r
+    (Chris Hostetter, Mark Miller)\r
+\r
+ * LUCENE-1789: Added utility class\r
+    oal.search.function.MultiValueSource to ease the transition to\r
+    segment based searching for any apps that directly call\r
+    oal.search.function.* APIs.  This class wraps any other\r
+    ValueSource, but takes care when composite (multi-segment) are\r
+    passed to not double RAM usage in the FieldCache.  (Chris\r
+    Hostetter, Mark Miller, Mike McCandless)\r
+   \r
+Optimizations\r
+\r
+ * LUCENE-1427: Fixed QueryWrapperFilter to not waste time computing\r
+    scores of the query, since they are just discarded.  Also, made it\r
+    more efficient (single pass) by not creating & populating an\r
+    intermediate OpenBitSet (Paul Elschot, Mike McCandless)\r
+\r
+ * LUCENE-1443: Performance improvement for OpenBitSetDISI.inPlaceAnd()\r
+    (Paul Elschot via yonik)\r
+\r
+ * LUCENE-1484: Remove synchronization of IndexReader.document() by\r
+    using CloseableThreadLocal internally.  (Jason Rutherglen via Mike\r
+    McCandless).\r
+    \r
+ * LUCENE-1124: Short circuit FuzzyQuery.rewrite when input token length \r
+    is small compared to minSimilarity. (Timo Nentwig, Mark Miller)\r
+\r
+ * LUCENE-1316: MatchAllDocsQuery now avoids the synchronized\r
+    IndexReader.isDeleted() call per document, by directly accessing\r
+    the underlying deleteDocs BitVector.  This improves performance\r
+    with non-readOnly readers, especially in a multi-threaded\r
+    environment.  (Todd Feak, Yonik Seeley, Jason Rutherglen via Mike\r
+    McCandless)\r
+\r
+ * LUCENE-1483: When searching over multiple segments we now visit\r
+    each sub-reader one at a time.  This speeds up warming, since\r
+    FieldCache entries (if required) can be shared across reopens for\r
+    those segments that did not change, and also speeds up searches\r
+    that sort by relevance or by field values.  (Mark Miller, Mike\r
+    McCandless)\r
+    \r
+ * LUCENE-1575: The new Collector class decouples collect() from\r
+    score computation.  Collector.setScorer is called to establish the\r
+    current Scorer in-use per segment.  Collectors that require the\r
+    score should then call Scorer.score() per hit inside\r
+    collect(). (Shai Erera via Mike McCandless)\r
+\r
+ * LUCENE-1596: MultiTermDocs speedup when set with\r
+    MultiTermDocs.seek(MultiTermEnum) (yonik)\r
+    \r
+ * LUCENE-1653: Avoid creating a Calendar in every call to \r
+    DateTools#dateToString, DateTools#timeToString and\r
+    DateTools#round.  (Shai Erera via Mark Miller)\r
+    \r
+ * LUCENE-1688: Deprecate static final String stop word array and \r
+    replace it with an immutable implementation of CharArraySet.\r
+    Removes conversions between Set and array.\r
+    (Simon Willnauer via Mark Miller)\r
+\r
+ * LUCENE-1754: BooleanQuery.queryWeight.scorer() will return null if\r
+    it won't match any documents (e.g. if there are no required and\r
+    optional scorers, or not enough optional scorers to satisfy\r
+    minShouldMatch).  (Shai Erera via Mike McCandless)\r
+\r
+ * LUCENE-1607: To speed up string interning for commonly used\r
+    strings, the StringHelper.intern() interface was added with a\r
+    default implementation that uses a lockless cache.\r
+    (Earwin Burrfoot, yonik)\r
+\r
+ * LUCENE-1800: QueryParser should use reusable TokenStreams. (yonik)\r
+    \r
+\r
+Documentation\r
+\r
+ * LUCENE-1908: Scoring documentation imrovements in Similarity javadocs. \r
+   (Mark Miller, Shai Erera, Ted Dunning, Jiri Kuhn, Marvin Humphrey, Doron Cohen)\r
+    \r
+ * LUCENE-1872: NumericField javadoc improvements\r
+    (Michael McCandless, Uwe Schindler)\r
\r
+ * LUCENE-1875: Make TokenStream.end javadoc less confusing.\r
+    (Uwe Schindler)\r
+\r
+ * LUCENE-1862: Rectified duplicate package level javadocs for\r
+    o.a.l.queryParser and o.a.l.analysis.cn.\r
+    (Chris Hostetter)\r
+\r
+ * LUCENE-1886: Improved hyperlinking in key Analysis javadocs\r
+    (Bernd Fondermann via Chris Hostetter)\r
+\r
+ * LUCENE-1884: massive javadoc and comment cleanup, primarily dealing with\r
+    typos.\r
+    (Robert Muir via Chris Hostetter)\r
+    \r
+ * LUCENE-1898: Switch changes to use bullets rather than numbers and \r
+    update changes-to-html script to handle the new format. \r
+    (Steven Rowe, Mark Miller)\r
+    \r
+ * LUCENE-1900: Improve Searchable Javadoc.\r
+    (Nadav Har'El, Doron Cohen, Marvin Humphrey, Mark Miller)\r
+    \r
+ * LUCENE-1896: Improve Similarity#queryNorm javadocs.\r
+    (Jiri Kuhn, Mark Miller)\r
+\r
+Build\r
+\r
+ * LUCENE-1440: Add new targets to build.xml that allow downloading\r
+    and executing the junit testcases from an older release for\r
+    backwards-compatibility testing. (Michael Busch)\r
+\r
+ * LUCENE-1446: Add compatibility tag to common-build.xml and run \r
+    backwards-compatibility tests in the nightly build. (Michael Busch)\r
+\r
+ * LUCENE-1529: Properly test "drop-in" replacement of jar with \r
+    backwards-compatibility tests. (Mike McCandless, Michael Busch)\r
+\r
+ * LUCENE-1851: Change 'javacc' and 'clean-javacc' targets to build\r
+    and clean contrib/surround files. (Luis Alves via Michael Busch)\r
+\r
+ * LUCENE-1854: tar task should use longfile="gnu" to avoid false file\r
+    name length warnings.  (Mark Miller)\r
+\r
+Test Cases\r
+\r
+ * LUCENE-1791: Enhancements to the QueryUtils and CheckHits utility \r
+    classes to wrap IndexReaders and Searchers in MultiReaders or \r
+    MultiSearcher when possible to help exercise more edge cases.\r
+    (Chris Hostetter, Mark Miller)\r
+\r
+ * LUCENE-1852: Fix localization test failures. \r
+    (Robert Muir via Michael Busch)\r
+    \r
+ * LUCENE-1843: Refactored all tests that use assertAnalyzesTo() & others\r
+    in core and contrib to use a new BaseTokenStreamTestCase\r
+    base class. Also rewrote some tests to use this general analysis assert\r
+    functions instead of own ones (e.g. TestMappingCharFilter).\r
+    The new base class also tests tokenization with the TokenStream.next()\r
+    backwards layer enabled (using Token/TokenWrapper as attribute\r
+    implementation) and disabled (default for Lucene 3.0)\r
+    (Uwe Schindler, Robert Muir)\r
+    \r
+ * LUCENE-1836: Added a new LocalizedTestCase as base class for localization\r
+    junit tests.  (Robert Muir, Uwe Schindler via Michael Busch)\r
+\r
+======================= Release 2.4.1 2009-03-09 =======================\r
+\r
+API Changes\r
+\r
+1. LUCENE-1186: Add Analyzer.close() to free internal ThreadLocal\r
+   resources.  (Christian Kohlschรผtter via Mike McCandless)\r
+\r
+Bug fixes\r
+\r
+1. LUCENE-1452: Fixed silent data-loss case whereby binary fields are\r
+   truncated to 0 bytes during merging if the segments being merged\r
+   are non-congruent (same field name maps to different field\r
+   numbers).  This bug was introduced with LUCENE-1219.  (Andrzej\r
+   Bialecki via Mike McCandless).\r
+\r
+2. LUCENE-1429: Don't throw incorrect IllegalStateException from\r
+   IndexWriter.close() if you've hit an OOM when autoCommit is true.\r
+   (Mike McCandless)\r
+\r
+3. LUCENE-1474: If IndexReader.flush() is called twice when there were\r
+   pending deletions, it could lead to later false AssertionError\r
+   during IndexReader.open.  (Mike McCandless)\r
+\r
+4. LUCENE-1430: Fix false AlreadyClosedException from IndexReader.open\r
+   (masking an actual IOException) that takes String or File path.\r
+   (Mike McCandless)\r
+\r
+5. LUCENE-1442: Multiple-valued NOT_ANALYZED fields can double-count\r
+   token offsets.  (Mike McCandless)\r
+\r
+6. LUCENE-1453: Ensure IndexReader.reopen()/clone() does not result in\r
+   incorrectly closing the shared FSDirectory. This bug would only\r
+   happen if you use IndexReader.open() with a File or String argument.\r
+   The returned readers are wrapped by a FilterIndexReader that\r
+   correctly handles closing of directory after reopen()/clone(). \r
+   (Mark Miller, Uwe Schindler, Mike McCandless)\r
+\r
+7. LUCENE-1457: Fix possible overflow bugs during binary\r
+   searches. (Mark Miller via Mike McCandless)\r
+\r
+8. LUCENE-1459: Fix CachingWrapperFilter to not throw exception if\r
+   both bits() and getDocIdSet() methods are called. (Matt Jones via\r
+   Mike McCandless)\r
+\r
+9. LUCENE-1519: Fix int overflow bug during segment merging.  (Deepak\r
+   via Mike McCandless)\r
+\r
+10. LUCENE-1521: Fix int overflow bug when flushing segment.\r
+    (Shon Vella via Mike McCandless).\r
+\r
+11. LUCENE-1544: Fix deadlock in IndexWriter.addIndexes(IndexReader[]).\r
+    (Mike McCandless via Doug Sale)\r
+\r
+12. LUCENE-1547: Fix rare thread safety issue if two threads call\r
+    IndexWriter commit() at the same time.  (Mike McCandless)\r
+\r
+13. LUCENE-1465: NearSpansOrdered returns payloads from first possible match \r
+    rather than the correct, shortest match; Payloads could be returned even\r
+    if the max slop was exceeded; The wrong payload could be returned in \r
+    certain situations. (Jonathan Mamou, Greg Shackles, Mark Miller)\r
+\r
+14. LUCENE-1186: Add Analyzer.close() to free internal ThreadLocal\r
+    resources.  (Christian Kohlschรผtter via Mike McCandless)\r
+\r
+15. LUCENE-1552: Fix IndexWriter.addIndexes(IndexReader[]) to properly\r
+    rollback IndexWriter's internal state on hitting an\r
+    exception. (Scott Garland via Mike McCandless)\r
+\r
+======================= Release 2.4.0 2008-10-06 =======================\r
+\r
+Changes in backwards compatibility policy\r
+\r
+1. LUCENE-1340: In a minor change to Lucene's backward compatibility\r
+   policy, we are now allowing the Fieldable interface to have\r
+   changes, within reason, and made on a case-by-case basis.  If an\r
+   application implements it's own Fieldable, please be aware of\r
+   this.  Otherwise, no need to be concerned.  This is in effect for\r
+   all 2.X releases, starting with 2.4.  Also note, that in all\r
+   likelihood, Fieldable will be changed in 3.0.\r
+\r
+\r
+Changes in runtime behavior\r
+\r
+ 1. LUCENE-1151: Fix StandardAnalyzer to not mis-identify host names\r
+    (eg lucene.apache.org) as an ACRONYM.  To get back to the pre-2.4\r
+    backwards compatible, but buggy, behavior, you can either call\r
+    StandardAnalyzer.setDefaultReplaceInvalidAcronym(false) (static\r
+    method), or, set system property\r
+    org.apache.lucene.analysis.standard.StandardAnalyzer.replaceInvalidAcronym\r
+    to "false" on JVM startup.  All StandardAnalyzer instances created\r
+    after that will then show the pre-2.4 behavior.  Alternatively,\r
+    you can call setReplaceInvalidAcronym(false) to change the\r
+    behavior per instance of StandardAnalyzer.  This backwards\r
+    compatibility will be removed in 3.0 (hardwiring the value to\r
+    true).  (Mike McCandless)\r
+\r
+ 2. LUCENE-1044: IndexWriter with autoCommit=true now commits (such\r
+    that a reader can see the changes) far less often than it used to.\r
+    Previously, every flush was also a commit.  You can always force a\r
+    commit by calling IndexWriter.commit().  Furthermore, in 3.0,\r
+    autoCommit will be hardwired to false (IndexWriter constructors\r
+    that take an autoCommit argument have been deprecated) (Mike\r
+    McCandless)\r
+\r
+ 3. LUCENE-1335: IndexWriter.addIndexes(Directory[]) and\r
+    addIndexesNoOptimize no longer allow the same Directory instance\r
+    to be passed in more than once.  Internally, IndexWriter uses\r
+    Directory and segment name to uniquely identify segments, so\r
+    adding the same Directory more than once was causing duplicates\r
+    which led to problems (Mike McCandless)\r
+\r
+ 4. LUCENE-1396: Improve PhraseQuery.toString() so that gaps in the\r
+    positions are indicated with a ? and multiple terms at the same\r
+    position are joined with a |.  (Andrzej Bialecki via Mike\r
+    McCandless)\r
+\r
+API Changes\r
+\r
+ 1. LUCENE-1084: Changed all IndexWriter constructors to take an\r
+    explicit parameter for maximum field size.  Deprecated all the\r
+    pre-existing constructors; these will be removed in release 3.0.\r
+    NOTE: these new constructors set autoCommit to false.  (Steven\r
+    Rowe via Mike McCandless)\r
+\r
+ 2. LUCENE-584: Changed Filter API to return a DocIdSet instead of a\r
+    java.util.BitSet. This allows using more efficient data structures\r
+    for Filters and makes them more flexible. This deprecates\r
+    Filter.bits(), so all filters that implement this outside\r
+    the Lucene code base will need to be adapted. See also the javadocs\r
+    of the Filter class. (Paul Elschot, Michael Busch)\r
+\r
+ 3. LUCENE-1044: Added IndexWriter.commit() which flushes any buffered\r
+    adds/deletes and then commits a new segments file so readers will\r
+    see the changes.  Deprecate IndexWriter.flush() in favor of\r
+    IndexWriter.commit().  (Mike McCandless)\r
+\r
+ 4. LUCENE-325: Added IndexWriter.expungeDeletes methods, which\r
+    consult the MergePolicy to find merges necessary to merge away all\r
+    deletes from the index.  This should be a somewhat lower cost\r
+    operation than optimize.  (John Wang via Mike McCandless)\r
+\r
+ 5. LUCENE-1233: Return empty array instead of null when no fields\r
+    match the specified name in these methods in Document:\r
+    getFieldables, getFields, getValues, getBinaryValues.  (Stefan\r
+    Trcek vai Mike McCandless)\r
+\r
+ 6. LUCENE-1234: Make BoostingSpanScorer protected.  (Andi Vajda via Grant Ingersoll)\r
+\r
+ 7. LUCENE-510: The index now stores strings as true UTF-8 bytes\r
+    (previously it was Java's modified UTF-8).  If any text, either\r
+    stored fields or a token, has illegal UTF-16 surrogate characters,\r
+    these characters are now silently replaced with the Unicode\r
+    replacement character U+FFFD.  This is a change to the index file\r
+    format.  (Marvin Humphrey via Mike McCandless)\r
+\r
+ 8. LUCENE-852: Let the SpellChecker caller specify IndexWriter mergeFactor\r
+    and RAM buffer size.  (Otis Gospodnetic)\r
+       \r
+ 9. LUCENE-1290: Deprecate org.apache.lucene.search.Hits, Hit and HitIterator\r
+    and remove all references to these classes from the core. Also update demos\r
+    and tutorials. (Michael Busch)\r
+\r
+10. LUCENE-1288: Add getVersion() and getGeneration() to IndexCommit.\r
+    getVersion() returns the same value that IndexReader.getVersion()\r
+    returns when the reader is opened on the same commit.  (Jason\r
+    Rutherglen via Mike McCandless)\r
+\r
+11. LUCENE-1311: Added IndexReader.listCommits(Directory) static\r
+    method to list all commits in a Directory, plus IndexReader.open\r
+    methods that accept an IndexCommit and open the index as of that\r
+    commit.  These methods are only useful if you implement a custom\r
+    DeletionPolicy that keeps more than the last commit around.\r
+    (Jason Rutherglen via Mike McCandless)\r
+\r
+12. LUCENE-1325: Added IndexCommit.isOptimized().  (Shalin Shekhar\r
+    Mangar via Mike McCandless)\r
+\r
+13. LUCENE-1324: Added TokenFilter.reset(). (Shai Erera via Mike\r
+    McCandless)\r
+\r
+14. LUCENE-1340: Added Fieldable.omitTf() method to skip indexing term\r
+    frequency, positions and payloads.  This saves index space, and\r
+    indexing/searching time.  (Eks Dev via Mike McCandless)\r
+\r
+15. LUCENE-1219: Add basic reuse API to Fieldable for binary fields:\r
+    getBinaryValue/Offset/Length(); currently only lazy fields reuse\r
+    the provided byte[] result to getBinaryValue.  (Eks Dev via Mike\r
+    McCandless)\r
+\r
+16. LUCENE-1334: Add new constructor for Term: Term(String fieldName)\r
+    which defaults term text to "".  (DM Smith via Mike McCandless)\r
+\r
+17. LUCENE-1333: Added Token.reinit(*) APIs to re-initialize (reuse) a\r
+    Token.  Also added term() method to return a String, with a\r
+    performance penalty clearly documented.  Also implemented\r
+    hashCode() and equals() in Token, and fixed all core and contrib\r
+    analyzers to use the re-use APIs.  (DM Smith via Mike McCandless)\r
+\r
+18. LUCENE-1329: Add optional readOnly boolean when opening an\r
+    IndexReader.  A readOnly reader is not allowed to make changes\r
+    (deletions, norms) to the index; in exchanged, the isDeleted\r
+    method, often a bottleneck when searching with many threads, is\r
+    not synchronized.  The default for readOnly is still false, but in\r
+    3.0 the default will become true.  (Jason Rutherglen via Mike\r
+    McCandless)\r
+\r
+19. LUCENE-1367: Add IndexCommit.isDeleted().  (Shalin Shekhar Mangar\r
+    via Mike McCandless)\r
+\r
+20. LUCENE-1061: Factored out all "new XXXQuery(...)" in\r
+    QueryParser.java into protected methods newXXXQuery(...) so that\r
+    subclasses can create their own subclasses of each Query type.\r
+    (John Wang via Mike McCandless)\r
+\r
+21. LUCENE-753: Added new Directory implementation\r
+    org.apache.lucene.store.NIOFSDirectory, which uses java.nio's\r
+    FileChannel to do file reads.  On most non-Windows platforms, with\r
+    many threads sharing a single searcher, this may yield sizable\r
+    improvement to query throughput when compared to FSDirectory,\r
+    which only allows a single thread to read from an open file at a\r
+    time.  (Jason Rutherglen via Mike McCandless)\r
+\r
+22. LUCENE-1371: Added convenience method TopDocs Searcher.search(Query query, int n).\r
+    (Mike McCandless)\r
+    \r
+23. LUCENE-1356: Allow easy extensions of TopDocCollector by turning\r
+    constructor and fields from package to protected. (Shai Erera\r
+    via Doron Cohen) \r
+\r
+24. LUCENE-1375: Added convenience method IndexCommit.getTimestamp,\r
+    which is equivalent to\r
+    getDirectory().fileModified(getSegmentsFileName()).  (Mike McCandless)\r
+\r
+23. LUCENE-1366: Rename Field.Index options to be more accurate:\r
+    TOKENIZED becomes ANALYZED;  UN_TOKENIZED becomes NOT_ANALYZED;\r
+    NO_NORMS becomes NOT_ANALYZED_NO_NORMS and a new ANALYZED_NO_NORMS\r
+    is added.  (Mike McCandless)\r
+\r
+24. LUCENE-1131: Added numDeletedDocs method to IndexReader (Otis Gospodnetic)\r
+\r
+Bug fixes\r
+    \r
+ 1. LUCENE-1134: Fixed BooleanQuery.rewrite to only optimize a single \r
+    clause query if minNumShouldMatch<=0. (Shai Erera via Michael Busch)\r
+\r
+ 2. LUCENE-1169: Fixed bug in IndexSearcher.search(): searching with\r
+    a filter might miss some hits because scorer.skipTo() is called\r
+    without checking if the scorer is already at the right position.\r
+    scorer.skipTo(scorer.doc()) is not a NOOP, it behaves as \r
+    scorer.next(). (Eks Dev, Michael Busch)\r
+\r
+ 3. LUCENE-1182: Added scorePayload to SimilarityDelegator (Andi Vajda via Grant Ingersoll)\r
\r
+ 4. LUCENE-1213: MultiFieldQueryParser was ignoring slop in case\r
+    of a single field phrase. (Trejkaz via Doron Cohen)\r
+\r
+ 5. LUCENE-1228: IndexWriter.commit() was not updating the index version and as\r
+    result IndexReader.reopen() failed to sense index changes. (Doron Cohen)\r
+\r
+ 6. LUCENE-1267: Added numDocs() and maxDoc() to IndexWriter;\r
+    deprecated docCount().  (Mike McCandless)\r
+\r
+ 7. LUCENE-1274: Added new prepareCommit() method to IndexWriter,\r
+    which does phase 1 of a 2-phase commit (commit() does phase 2).\r
+    This is needed when you want to update an index as part of a\r
+    transaction involving external resources (eg a database).  Also\r
+    deprecated abort(), renaming it to rollback().  (Mike McCandless)\r
+\r
+ 8. LUCENE-1003: Stop RussianAnalyzer from removing numbers.\r
+    (TUSUR OpenTeam, Dmitry Lihachev via Otis Gospodnetic)\r
+\r
+ 9. LUCENE-1152: SpellChecker fix around clearIndex and indexDictionary\r
+    methods, plus removal of IndexReader reference.\r
+    (Naveen Belkale via Otis Gospodnetic)\r
+\r
+10. LUCENE-1046: Removed dead code in SpellChecker\r
+    (Daniel Naber via Otis Gospodnetic)\r
+       \r
+11. LUCENE-1189: Fixed the QueryParser to handle escaped characters within \r
+    quoted terms correctly. (Tomer Gabel via Michael Busch)\r
+\r
+12. LUCENE-1299: Fixed NPE in SpellChecker when IndexReader is not null and field is (Grant Ingersoll)\r
+\r
+13. LUCENE-1303: Fixed BoostingTermQuery's explanation to be marked as a Match \r
+    depending only upon the non-payload score part, regardless of the effect of \r
+    the payload on the score. Prior to this, score of a query containing a BTQ \r
+    differed from its explanation. (Doron Cohen)\r
+    \r
+14. LUCENE-1310: Fixed SloppyPhraseScorer to work also for terms repeating more \r
+    than twice in the query. (Doron Cohen)\r
+\r
+15. LUCENE-1351: ISOLatin1AccentFilter now cleans additional ligatures (Cedrik Lime via Grant Ingersoll)\r
+\r
+16. LUCENE-1383: Workaround a nasty "leak" in Java's builtin\r
+    ThreadLocal, to prevent Lucene from causing unexpected\r
+    OutOfMemoryError in certain situations (notably J2EE\r
+    applications).  (Chris Lu via Mike McCandless)\r
+\r
+New features\r
+\r
+ 1. LUCENE-1137: Added Token.set/getFlags() accessors for passing more information about a Token through the analysis\r
+    process.  The flag is not indexed/stored and is thus only used by analysis.\r
+\r
+ 2. LUCENE-1147: Add -segment option to CheckIndex tool so you can\r
+    check only a specific segment or segments in your index.  (Mike\r
+    McCandless)\r
+\r
+ 3. LUCENE-1045: Reopened this issue to add support for short and bytes. \r
\r
+ 4. LUCENE-584: Added new data structures to o.a.l.util, such as \r
+    OpenBitSet and SortedVIntList. These extend DocIdSet and can \r
+    directly be used for Filters with the new Filter API. Also changed\r
+    the core Filters to use OpenBitSet instead of java.util.BitSet.\r
+    (Paul Elschot, Michael Busch)\r
+\r
+ 5. LUCENE-494: Added QueryAutoStopWordAnalyzer to allow for the automatic removal, from a query of frequently occurring terms.\r
+    This Analyzer is not intended for use during indexing. (Mark Harwood via Grant Ingersoll)\r
+\r
+ 6. LUCENE-1044: Change Lucene to properly "sync" files after\r
+    committing, to ensure on a machine or OS crash or power cut, even\r
+    with cached writes, the index remains consistent.  Also added\r
+    explicit commit() method to IndexWriter to force a commit without\r
+    having to close.  (Mike McCandless)\r
+    \r
+ 7. LUCENE-997: Add search timeout (partial) support.\r
+    A TimeLimitedCollector was added to allow limiting search time.\r
+    It is a partial solution since timeout is checked only when \r
+    collecting a hit, and therefore a search for rare words in a \r
+    huge index might not stop within the specified time.\r
+    (Sean Timm via Doron Cohen) \r
+\r
+ 8. LUCENE-1184: Allow SnapshotDeletionPolicy to be re-used across\r
+    close/re-open of IndexWriter while still protecting an open\r
+    snapshot (Tim Brennan via Mike McCandless)\r
+\r
+ 9. LUCENE-1194: Added IndexWriter.deleteDocuments(Query) to delete\r
+    documents matching the specified query.  Also added static unlock\r
+    and isLocked methods (deprecating the ones in IndexReader).  (Mike\r
+    McCandless)\r
+\r
+10. LUCENE-1201: Add IndexReader.getIndexCommit() method. (Tim Brennan\r
+    via Mike McCandless)\r
+\r
+11. LUCENE-550:  Added InstantiatedIndex implementation.  Experimental \r
+    Index store similar to MemoryIndex but allows for multiple documents \r
+    in memory.  (Karl Wettin via Grant Ingersoll)\r
+\r
+12. LUCENE-400: Added word based n-gram filter (in contrib/analyzers) called ShingleFilter and an Analyzer wrapper\r
+    that wraps another Analyzer's token stream with a ShingleFilter (Sebastian Kirsch, Steve Rowe via Grant Ingersoll) \r
+\r
+13. LUCENE-1166: Decomposition tokenfilter for languages like German and Swedish (Thomas Peuss via Grant Ingersoll)\r
+\r
+14. LUCENE-1187: ChainedFilter and BooleanFilter now work with new Filter API\r
+    and DocIdSetIterator-based filters. Backwards-compatibility with old \r
+    BitSet-based filters is ensured. (Paul Elschot via Michael Busch)\r
+\r
+15. LUCENE-1295: Added new method to MoreLikeThis for retrieving interesting terms and made retrieveTerms(int) public. (Grant Ingersoll)\r
+\r
+16. LUCENE-1298: MoreLikeThis can now accept a custom Similarity (Grant Ingersoll)\r
+\r
+17. LUCENE-1297: Allow other string distance measures for the SpellChecker\r
+    (Thomas Morton via Otis Gospodnetic)\r
+\r
+18. LUCENE-1001: Provide access to Payloads via Spans.  All existing Span Query implementations in Lucene implement. (Mark Miller, Grant Ingersoll)\r
+\r
+19. LUCENE-1354: Provide programmatic access to CheckIndex (Grant Ingersoll, Mike McCandless)\r
+\r
+20. LUCENE-1279: Add support for Collators to RangeFilter/Query and Query Parser.  (Steve Rowe via Grant Ingersoll) \r
+\r
+Optimizations\r
+\r
+ 1. LUCENE-705: When building a compound file, use\r
+    RandomAccessFile.setLength() to tell the OS/filesystem to\r
+    pre-allocate space for the file.  This may improve fragmentation\r
+    in how the CFS file is stored, and allows us to detect an upcoming\r
+    disk full situation before actually filling up the disk.  (Mike\r
+    McCandless)\r
+\r
+ 2. LUCENE-1120: Speed up merging of term vectors by bulk-copying the\r
+    raw bytes for each contiguous range of non-deleted documents.\r
+    (Mike McCandless)\r
+       \r
+ 3. LUCENE-1185: Avoid checking if the TermBuffer 'scratch' in \r
+    SegmentTermEnum is null for every call of scanTo().\r
+    (Christian Kohlschuetter via Michael Busch)\r
+\r
+ 4. LUCENE-1217: Internal to Field.java, use isBinary instead of\r
+    runtime type checking for possible speedup of binaryValue().\r
+    (Eks Dev via Mike McCandless)\r
+\r
+ 5. LUCENE-1183: Optimized TRStringDistance class (in contrib/spell) that uses\r
+    less memory than the previous version.  (Cรฉdrik LIME via Otis Gospodnetic)\r
+\r
+ 6. LUCENE-1195: Improve term lookup performance by adding a LRU cache to the\r
+    TermInfosReader. In performance experiments the speedup was about 25% on \r
+    average on mid-size indexes with ~500,000 documents for queries with 3 \r
+    terms and about 7% on larger indexes with ~4.3M documents. (Michael Busch)\r
+\r
+Documentation\r
+\r
+  1. LUCENE-1236:  Added some clarifying remarks to EdgeNGram*.java (Hiroaki Kawai via Grant Ingersoll)\r
+  \r
+  2. LUCENE-1157 and LUCENE-1256: HTML changes log, created automatically \r
+     from CHANGES.txt. This HTML file is currently visible only via developers page.     \r
+     (Steven Rowe via Doron Cohen)\r
+\r
+  3. LUCENE-1349: Fieldable can now be changed without breaking backward compatibility rules (within reason.  See the note at\r
+  the top of this file and also on Fieldable.java).  (Grant Ingersoll)\r
+  \r
+  4. LUCENE-1873: Update documentation to reflect current Contrib area status.\r
+     (Steven Rowe, Mark Miller)\r
+\r
+Build\r
+\r
+  1. LUCENE-1153: Added JUnit JAR to new lib directory.  Updated build to rely on local JUnit instead of ANT/lib.\r
+  \r
+  2. LUCENE-1202: Small fixes to the way Clover is used to work better\r
+     with contribs.  Of particular note: a single clover db is used\r
+     regardless of whether tests are run globally or in the specific\r
+     contrib directories. \r
+     \r
+  3. LUCENE-1353: Javacc target in contrib/miscellaneous for \r
+     generating the precedence query parser. \r
+\r
+Test Cases\r
+\r
+ 1. LUCENE-1238: Fixed intermittent failures of TestTimeLimitedCollector.testTimeoutMultiThreaded.\r
+    Within this fix, "greedy" flag was added to TimeLimitedCollector, to allow the wrapped \r
+    collector to collect also the last doc, after allowed-tTime passed. (Doron Cohen)   \r
+       \r
+ 2. LUCENE-1348: relax TestTimeLimitedCollector to not fail due to \r
+    timeout exceeded (just because test machine is very busy).\r
+       \r
+======================= Release 2.3.2 2008-05-05 =======================\r
+\r
+Bug fixes\r
+\r
+ 1. LUCENE-1191: On hitting OutOfMemoryError in any index-modifying\r
+    methods in IndexWriter, do not commit any further changes to the\r
+    index to prevent risk of possible corruption.  (Mike McCandless)\r
+\r
+ 2. LUCENE-1197: Fixed issue whereby IndexWriter would flush by RAM\r
+    too early when TermVectors were in use.  (Mike McCandless)\r
+\r
+ 3. LUCENE-1198: Don't corrupt index if an exception happens inside\r
+    DocumentsWriter.init (Mike McCandless)\r
+\r
+ 4. LUCENE-1199: Added defensive check for null indexReader before\r
+    calling close in IndexModifier.close() (Mike McCandless)\r
+\r
+ 5. LUCENE-1200: Fix rare deadlock case in addIndexes* when\r
+    ConcurrentMergeScheduler is in use (Mike McCandless)\r
+\r
+ 6. LUCENE-1208: Fix deadlock case on hitting an exception while\r
+    processing a document that had triggered a flush (Mike McCandless)\r
+\r
+ 7. LUCENE-1210: Fix deadlock case on hitting an exception while\r
+    starting a merge when using ConcurrentMergeScheduler (Mike McCandless)\r
+\r
+ 8. LUCENE-1222: Fix IndexWriter.doAfterFlush to always be called on\r
+    flush (Mark Ferguson via Mike McCandless)\r
+       \r
+ 9. LUCENE-1226: Fixed IndexWriter.addIndexes(IndexReader[]) to commit\r
+    successfully created compound files. (Michael Busch)\r
+\r
+10. LUCENE-1150: Re-expose StandardTokenizer's constants publicly;\r
+    this was accidentally lost with LUCENE-966.  (Nicolas Lalevรฉe via\r
+    Mike McCandless)\r
+\r
+11. LUCENE-1262: Fixed bug in BufferedIndexReader.refill whereby on\r
+    hitting an exception in readInternal, the buffer is incorrectly\r
+    filled with stale bytes such that subsequent calls to readByte()\r
+    return incorrect results.  (Trejkaz via Mike McCandless)\r
+\r
+12. LUCENE-1270: Fixed intermittent case where IndexWriter.close()\r
+    would hang after IndexWriter.addIndexesNoOptimize had been\r
+    called.  (Stu Hood via Mike McCandless)\r
+       \r
+Build\r
+\r
+ 1. LUCENE-1230: Include *pom.xml* in source release files. (Michael Busch)\r
+\r
\r
+======================= Release 2.3.1 2008-02-22 =======================\r
+\r
+Bug fixes\r
+    \r
+ 1. LUCENE-1168: Fixed corruption cases when autoCommit=false and\r
+    documents have mixed term vectors (Suresh Guvvala via Mike\r
+    McCandless).\r
+\r
+ 2. LUCENE-1171: Fixed some cases where OOM errors could cause\r
+    deadlock in IndexWriter (Mike McCandless).\r
+\r
+ 3. LUCENE-1173: Fixed corruption case when autoCommit=false and bulk\r
+    merging of stored fields is used (Yonik via Mike McCandless).\r
+\r
+ 4. LUCENE-1163: Fixed bug in CharArraySet.contains(char[] buffer, int\r
+    offset, int len) that was ignoring offset and thus giving the\r
+    wrong answer.  (Thomas Peuss via Mike McCandless)\r
+       \r
+ 5. LUCENE-1177: Fix rare case where IndexWriter.optimize might do too\r
+    many merges at the end.  (Mike McCandless)\r
+       \r
+ 6. LUCENE-1176: Fix corruption case when documents with no term\r
+    vector fields are added before documents with term vector fields.\r
+    (Mike McCandless)\r
+       \r
+ 7. LUCENE-1179: Fixed assert statement that was incorrectly\r
+    preventing Fields with empty-string field name from working.\r
+    (Sergey Kabashnyuk via Mike McCandless)\r
+\r
+======================= Release 2.3.0 2008-01-21 =======================\r
+\r
+Changes in runtime behavior\r
+\r
+ 1. LUCENE-994: Defaults for IndexWriter have been changed to maximize\r
+    out-of-the-box indexing speed.  First, IndexWriter now flushes by\r
+    RAM usage (16 MB by default) instead of a fixed doc count (call\r
+    IndexWriter.setMaxBufferedDocs to get backwards compatible\r
+    behavior).  Second, ConcurrentMergeScheduler is used to run merges\r
+    using background threads (call IndexWriter.setMergeScheduler(new\r
+    SerialMergeScheduler()) to get backwards compatible behavior).\r
+    Third, merges are chosen based on size in bytes of each segment\r
+    rather than document count of each segment (call\r
+    IndexWriter.setMergePolicy(new LogDocMergePolicy()) to get\r
+    backwards compatible behavior).\r
+\r
+    NOTE: users of ParallelReader must change back all of these\r
+    defaults in order to ensure the docIDs "align" across all parallel\r
+    indices.\r
+\r
+    (Mike McCandless)\r
+\r
+ 2. LUCENE-1045: SortField.AUTO didn't work with long. When detecting\r
+    the field type for sorting automatically, numbers used to be\r
+    interpreted as int, then as float, if parsing the number as an int\r
+    failed. Now the detection checks for int, then for long,\r
+    then for float. (Daniel Naber)\r
+\r
+API Changes\r
+\r
+ 1. LUCENE-843: Added IndexWriter.setRAMBufferSizeMB(...) to have\r
+    IndexWriter flush whenever the buffered documents are using more\r
+    than the specified amount of RAM.  Also added new APIs to Token\r
+    that allow one to set a char[] plus offset and length to specify a\r
+    token (to avoid creating a new String() for each Token).  (Mike\r
+    McCandless)\r
+\r
+ 2. LUCENE-963: Add setters to Field to allow for re-using a single\r
+    Field instance during indexing.  This is a sizable performance\r
+    gain, especially for small documents.  (Mike McCandless)\r
+\r
+ 3. LUCENE-969: Add new APIs to Token, TokenStream and Analyzer to\r
+    permit re-using of Token and TokenStream instances during\r
+    indexing.  Changed Token to use a char[] as the store for the\r
+    termText instead of String.  This gives faster tokenization\r
+    performance (~10-15%).  (Mike McCandless)\r
+\r
+ 4. LUCENE-847: Factored MergePolicy, which determines which merges\r
+    should take place and when, as well as MergeScheduler, which\r
+    determines when the selected merges should actually run, out of\r
+    IndexWriter.  The default merge policy is now\r
+    LogByteSizeMergePolicy (see LUCENE-845) and the default merge\r
+    scheduler is now ConcurrentMergeScheduler (see\r
+    LUCENE-870). (Steven Parkes via Mike McCandless)\r
+\r
+ 5. LUCENE-1052: Add IndexReader.setTermInfosIndexDivisor(int) method\r
+    that allows you to reduce memory usage of the termInfos by further\r
+    sub-sampling (over the termIndexInterval that was used during\r
+    indexing) which terms are loaded into memory.  (Chuck Williams,\r
+    Doug Cutting via Mike McCandless)\r
+    \r
+ 6. LUCENE-743: Add IndexReader.reopen() method that re-opens an\r
+    existing IndexReader (see New features -> 8.) (Michael Busch)\r
+\r
+ 7. LUCENE-1062: Add setData(byte[] data), \r
+    setData(byte[] data, int offset, int length), getData(), getOffset()\r
+    and clone() methods to o.a.l.index.Payload. Also add the field name \r
+    as arg to Similarity.scorePayload(). (Michael Busch)\r
+\r
+ 8. LUCENE-982: Add IndexWriter.optimize(int maxNumSegments) method to\r
+    "partially optimize" an index down to maxNumSegments segments.\r
+    (Mike McCandless)\r
+\r
+ 9. LUCENE-1080: Changed Token.DEFAULT_TYPE to be public.\r
+\r
+10. LUCENE-1064: Changed TopDocs constructor to be public. \r
+     (Shai Erera via Michael Busch)\r
+\r
+11. LUCENE-1079: DocValues cleanup: constructor now has no params,\r
+    and getInnerArray() now throws UnsupportedOperationException (Doron Cohen)\r
+\r
+12. LUCENE-1089: Added PriorityQueue.insertWithOverflow, which returns\r
+    the Object (if any) that was bumped from the queue to allow\r
+    re-use.  (Shai Erera via Mike McCandless)\r
+    \r
+13. LUCENE-1101: Token reuse 'contract' (defined LUCENE-969)\r
+    modified so it is token producer's responsibility\r
+    to call Token.clear(). (Doron Cohen)   \r
+\r
+14. LUCENE-1118: Changed StandardAnalyzer to skip too-long (default >\r
+    255 characters) tokens.  You can increase this limit by calling\r
+    StandardAnalyzer.setMaxTokenLength(...).  (Michael McCandless)\r
+\r
+\r
+Bug fixes\r
+\r
+ 1. LUCENE-933: QueryParser fixed to not produce empty sub \r
+    BooleanQueries "()" even if the Analyzer produced no \r
+    tokens for input. (Doron Cohen)\r
+\r
+ 2. LUCENE-955: Fixed SegmentTermPositions to work correctly with the\r
+    first term in the dictionary. (Michael Busch)\r
+\r
+ 3. LUCENE-951: Fixed NullPointerException in MultiLevelSkipListReader\r
+    that was thrown after a call of TermPositions.seek(). \r
+    (Rich Johnson via Michael Busch)\r
+    \r
+ 4. LUCENE-938: Fixed cases where an unhandled exception in\r
+    IndexWriter's methods could cause deletes to be lost.\r
+    (Steven Parkes via Mike McCandless)\r
+      \r
+ 5. LUCENE-962: Fixed case where an unhandled exception in\r
+    IndexWriter.addDocument or IndexWriter.updateDocument could cause\r
+    unreferenced files in the index to not be deleted\r
+    (Steven Parkes via Mike McCandless)\r
+  \r
+ 6. LUCENE-957: RAMDirectory fixed to properly handle directories\r
+    larger than Integer.MAX_VALUE. (Doron Cohen)\r
+\r
+ 7. LUCENE-781: MultiReader fixed to not throw NPE if isCurrent(),\r
+    isOptimized() or getVersion() is called. Separated MultiReader\r
+    into two classes: MultiSegmentReader extends IndexReader, is\r
+    package-protected and is created automatically by IndexReader.open()\r
+    in case the index has multiple segments. The public MultiReader \r
+    now extends MultiSegmentReader and is intended to be used by users\r
+    who want to add their own subreaders. (Daniel Naber, Michael Busch)\r
+\r
+ 8. LUCENE-970: FilterIndexReader now implements isOptimized(). Before\r
+    a call of isOptimized() would throw a NPE. (Michael Busch)\r
+\r
+ 9. LUCENE-832: ParallelReader fixed to not throw NPE if isCurrent(),\r
+    isOptimized() or getVersion() is called. (Michael Busch)\r
+      \r
+10. LUCENE-948: Fix FNFE exception caused by stale NFS client\r
+    directory listing caches when writers on different machines are\r
+    sharing an index over NFS and using a custom deletion policy (Mike\r
+    McCandless)\r
+\r
+11. LUCENE-978: Ensure TermInfosReader, FieldsReader, and FieldsReader\r
+    close any streams they had opened if an exception is hit in the\r
+    constructor.  (Ning Li via Mike McCandless)\r
+\r
+12. LUCENE-985: If an extremely long term is in a doc (> 16383 chars),\r
+    we now throw an IllegalArgumentException saying the term is too\r
+    long, instead of cryptic ArrayIndexOutOfBoundsException.  (Karl\r
+    Wettin via Mike McCandless)\r
+\r
+13. LUCENE-991: The explain() method of BoostingTermQuery had errors\r
+    when no payloads were present on a document.  (Peter Keegan via\r
+    Grant Ingersoll)\r
+\r
+14. LUCENE-992: Fixed IndexWriter.updateDocument to be atomic again\r
+    (this was broken by LUCENE-843).  (Ning Li via Mike McCandless)\r
+\r
+15. LUCENE-1008: Fixed corruption case when document with no term\r
+    vector fields is added after documents with term vector fields.\r
+    This bug was introduced with LUCENE-843.  (Grant Ingersoll via\r
+    Mike McCandless)\r
+\r
+16. LUCENE-1006: Fixed QueryParser to accept a "" field value (zero\r
+    length quoted string.)  (yonik)\r
+\r
+17. LUCENE-1010: Fixed corruption case when document with no term\r
+    vector fields is added after documents with term vector fields.\r
+    This case is hit during merge and would cause an EOFException.\r
+    This bug was introduced with LUCENE-984.  (Andi Vajda via Mike\r
+    McCandless)\r
+\r
+19. LUCENE-1009: Fix merge slowdown with LogByteSizeMergePolicy when\r
+    autoCommit=false and documents are using stored fields and/or term\r
+    vectors.  (Mark Miller via Mike McCandless)\r
+\r
+20. LUCENE-1011: Fixed corruption case when two or more machines,\r
+    sharing an index over NFS, can be writers in quick succession.\r
+    (Patrick Kimber via Mike McCandless)\r
+\r
+21. LUCENE-1028: Fixed Weight serialization for few queries:\r
+    DisjunctionMaxQuery, ValueSourceQuery, CustomScoreQuery.\r
+    Serialization check added for all queries.\r
+    (Kyle Maxwell via Doron Cohen)\r
+\r
+22. LUCENE-1048: Fixed incorrect behavior in Lock.obtain(...) when the\r
+    timeout argument is very large (eg Long.MAX_VALUE).  Also added\r
+    Lock.LOCK_OBTAIN_WAIT_FOREVER constant to never timeout.  (Nikolay\r
+    Diakov via Mike McCandless)\r
+\r
+23. LUCENE-1050: Throw LockReleaseFailedException in\r
+    Simple/NativeFSLockFactory if we fail to delete the lock file when\r
+    releasing the lock.  (Nikolay Diakov via Mike McCandless)\r
+\r
+24. LUCENE-1071: Fixed SegmentMerger to correctly set payload bit in \r
+    the merged segment. (Michael Busch)\r
+\r
+25. LUCENE-1042: Remove throwing of IOException in getTermFreqVector(int, String, TermVectorMapper) to be consistent\r
+    with other getTermFreqVector calls.  Also removed the throwing of the other IOException in that method to be consistent.  (Karl Wettin via Grant Ingersoll)\r
+    \r
+26. LUCENE-1096: Fixed Hits behavior when hits' docs are deleted \r
+    along with iterating the hits. Deleting docs already retrieved \r
+    now works seamlessly. If docs not yet retrieved are deleted \r
+    (e.g. from another thread), and then, relying on the initial \r
+    Hits.length(), an application attempts to retrieve more hits \r
+    than actually exist , a ConcurrentMidificationException \r
+    is thrown.  (Doron Cohen)\r
+\r
+27. LUCENE-1068: Changed StandardTokenizer to fix an issue with it marking\r
+  the type of some tokens incorrectly.  This is done by adding a new flag named\r
+  replaceInvalidAcronym which defaults to false, the current, incorrect behavior.  Setting\r
+  this flag to true fixes the problem.  This flag is a temporary fix and is already\r
+  marked as being deprecated.  3.x will implement the correct approach.  (Shai Erera via Grant Ingersoll)\r
+  LUCENE-1140: Fixed NPE caused by 1068 (Alexei Dets via Grant Ingersoll)\r
+    \r
+28. LUCENE-749: ChainedFilter behavior fixed when logic of \r
+    first filter is ANDNOT.  (Antonio Bruno via Doron Cohen)\r
+\r
+29. LUCENE-508: Make sure SegmentTermEnum.prev() is accurate (= last\r
+    term) after next() returns false.  (Steven Tamm via Mike\r
+    McCandless)\r
+\r
+    \r
+New features\r
+\r
+ 1. LUCENE-906: Elision filter for French.\r
+    (Mathieu Lecarme via Otis Gospodnetic)\r
+\r
+ 2. LUCENE-960: Added a SpanQueryFilter and related classes to allow for\r
+    not only filtering, but knowing where in a Document a Filter matches\r
+    (Grant Ingersoll)\r
+\r
+ 3. LUCENE-868: Added new Term Vector access features.  New callback\r
+    mechanism allows application to define how and where to read Term\r
+    Vectors from disk. This implementation contains several extensions\r
+    of the new abstract TermVectorMapper class.  The new API should be\r
+    back-compatible.  No changes in the actual storage of Term Vectors\r
+    has taken place.\r
+ 3.1 LUCENE-1038: Added setDocumentNumber() method to TermVectorMapper\r
+     to provide information about what document is being accessed.\r
+     (Karl Wettin via Grant Ingersoll)\r
+\r
+ 4. LUCENE-975: Added PositionBasedTermVectorMapper that allows for\r
+    position based lookup of term vector information.\r
+    See item #3 above (LUCENE-868).\r
+\r
+ 5. LUCENE-1011: Added simple tools (all in org.apache.lucene.store)\r
+    to verify that locking is working properly.  LockVerifyServer runs\r
+    a separate server to verify locks.  LockStressTest runs a simple\r
+    tool that rapidly obtains and releases locks.\r
+    VerifyingLockFactory is a LockFactory that wraps any other\r
+    LockFactory and consults the LockVerifyServer whenever a lock is\r
+    obtained or released, throwing an exception if an illegal lock\r
+    obtain occurred.  (Patrick Kimber via Mike McCandless)\r
+\r
+ 6. LUCENE-1015: Added FieldCache extension (ExtendedFieldCache) to\r
+    support doubles and longs.  Added support into SortField for sorting\r
+    on doubles and longs as well.  (Grant Ingersoll)\r
+\r
+ 7. LUCENE-1020: Created basic index checking & repair tool\r
+    (o.a.l.index.CheckIndex).  When run without -fix it does a\r
+    detailed test of all segments in the index and reports summary\r
+    information and any errors it hit.  With -fix it will remove\r
+    segments that had errors.  (Mike McCandless)\r
+\r
+ 8. LUCENE-743: Add IndexReader.reopen() method that re-opens an\r
+    existing IndexReader by only loading those portions of an index\r
+    that have changed since the reader was (re)opened. reopen() can\r
+    be significantly faster than open(), depending on the amount of\r
+    index changes. SegmentReader, MultiSegmentReader, MultiReader,\r
+    and ParallelReader implement reopen(). (Michael Busch) \r
+\r
+ 9. LUCENE-1040: CharArraySet useful for efficiently checking\r
+    set membership of text specified by char[]. (yonik)\r
+\r
+10. LUCENE-1073: Created SnapshotDeletionPolicy to facilitate taking a\r
+    live backup of an index without pausing indexing.  (Mike\r
+    McCandless)\r
+    \r
+11. LUCENE-1019: CustomScoreQuery enhanced to support multiple \r
+    ValueSource queries. (Kyle Maxwell via Doron Cohen)\r
+    \r
+12. LUCENE-1095: Added an option to StopFilter to increase \r
+    positionIncrement of the token succeeding a stopped token.\r
+    Disabled by default. Similar option added to QueryParser \r
+    to consider token positions when creating PhraseQuery \r
+    and MultiPhraseQuery. Disabled by default (so by default\r
+    the query parser ignores position increments).\r
+    (Doron Cohen)\r
+\r
+13. LUCENE-1380: Added TokenFilter for setting position increment in special cases related to the ShingleFilter (Mck SembWever, Steve Rowe, Karl Wettin via Grant Ingersoll)\r
+\r
+\r
+\r
+Optimizations\r
+\r
+ 1. LUCENE-937: CachingTokenFilter now uses an iterator to access the \r
+    Tokens that are cached in the LinkedList. This increases performance \r
+    significantly, especially when the number of Tokens is large. \r
+    (Mark Miller via Michael Busch)\r
+\r
+ 2. LUCENE-843: Substantial optimizations to improve how IndexWriter\r
+    uses RAM for buffering documents and to speed up indexing (2X-8X\r
+    faster).  A single shared hash table now records the in-memory\r
+    postings per unique term and is directly flushed into a single\r
+    segment.  (Mike McCandless)\r
\r
+ 3. LUCENE-892: Fixed extra "buffer to buffer copy" that sometimes\r
+    takes place when using compound files.  (Mike McCandless)\r
+\r
+ 4. LUCENE-959: Remove synchronization in Document (yonik)\r
+\r
+ 5. LUCENE-963: Add setters to Field to allow for re-using a single\r
+    Field instance during indexing.  This is a sizable performance\r
+    gain, especially for small documents.  (Mike McCandless)\r
+\r
+ 6. LUCENE-939: Check explicitly for boundary conditions in FieldInfos\r
+    and don't rely on exceptions. (Michael Busch)\r
+\r
+ 7. LUCENE-966: Very substantial speedups (~6X faster) for\r
+    StandardTokenizer (StandardAnalyzer) by using JFlex instead of\r
+    JavaCC to generate the tokenizer.\r
+    (Stanislaw Osinski via Mike McCandless)\r
+\r
+ 8. LUCENE-969: Changed core tokenizers & filters to re-use Token and\r
+    TokenStream instances when possible to improve tokenization\r
+    performance (~10-15%). (Mike McCandless)\r
+\r
+ 9. LUCENE-871: Speedup ISOLatin1AccentFilter (Ian Boston via Mike\r
+    McCandless)\r
+\r
+10. LUCENE-986: Refactored SegmentInfos from IndexReader into the new\r
+    subclass DirectoryIndexReader. SegmentReader and MultiSegmentReader\r
+    now extend DirectoryIndexReader and are the only IndexReader \r
+    implementations that use SegmentInfos to access an index and \r
+    acquire a write lock for index modifications. (Michael Busch)\r
+\r
+11. LUCENE-1007: Allow flushing in IndexWriter to be triggered by\r
+    either RAM usage or document count or both (whichever comes\r
+    first), by adding symbolic constant DISABLE_AUTO_FLUSH to disable\r
+    one of the flush triggers.  (Ning Li via Mike McCandless)\r
+\r
+12. LUCENE-1043: Speed up merging of stored fields by bulk-copying the\r
+    raw bytes for each contiguous range of non-deleted documents.\r
+    (Robert Engels via Mike McCandless)\r
+\r
+13. LUCENE-693: Speed up nested conjunctions (~2x) that match many\r
+    documents, and a slight performance increase for top level\r
+    conjunctions.  (yonik)\r
+\r
+14. LUCENE-1098: Make inner class StandardAnalyzer.SavedStreams static \r
+    and final. (Nathan Beyer via Michael Busch)\r
+\r
+Documentation\r
+\r
+ 1. LUCENE-1051: Generate separate javadocs for core, demo and contrib\r
+    classes, as well as an unified view. Also add an appropriate menu \r
+    structure to the website. (Michael Busch)\r
+\r
+ 2. LUCENE-746: Fix error message in AnalyzingQueryParser.getPrefixQuery.\r
+    (Ronnie Kolehmainen via Michael Busch)\r
+\r
+Build\r
+\r
+ 1. LUCENE-908: Improvements and simplifications for how the MANIFEST\r
+    file and the META-INF dir are created. (Michael Busch)\r
+\r
+ 2. LUCENE-935: Various improvements for the maven artifacts. Now the\r
+    artifacts also include the sources as .jar files. (Michael Busch)\r
+\r
+ 3. Added apply-patch target to top-level build.  Defaults to looking for\r
+    a patch in ${basedir}/../patches with name specified by -Dpatch.name.\r
+    Can also specify any location by -Dpatch.file property on the command\r
+    line.  This should be helpful for easy application of patches, but it\r
+    is also a step towards integrating automatic patch application with\r
+    JIRA and Hudson, and is thus subject to change.  (Grant Ingersoll)\r
\r
+ 4. LUCENE-935: Defined property "m2.repository.url" to allow setting\r
+    the url to a maven remote repository to deploy to. (Michael Busch)\r
+\r
+ 5. LUCENE-1051: Include javadocs in the maven artifacts. (Michael Busch)\r
+\r
+ 6. LUCENE-1055: Remove gdata-server from build files and its sources \r
+    from trunk. (Michael Busch)\r
+\r
+ 7. LUCENE-935: Allow to deploy maven artifacts to a remote m2 repository\r
+    via scp and ssh authentication. (Michael Busch)\r
+       \r
+ 8. LUCENE-1123: Allow overriding the specification version for \r
+    MANIFEST.MF (Michael Busch)\r
+\r
+Test Cases\r
+\r
+ 1. LUCENE-766: Test adding two fields with the same name but different \r
+    term vector setting.  (Nicolas Lalevรฉe via Doron Cohen)  \r
+    \r
+======================= Release 2.2.0 2007-06-19 =======================\r
+\r
+Changes in runtime behavior\r
+\r
+API Changes\r
+\r
+ 1. LUCENE-793: created new exceptions and added them to throws clause\r
+    for many methods (all subclasses of IOException for backwards\r
+    compatibility): index.StaleReaderException,\r
+    index.CorruptIndexException, store.LockObtainFailedException.\r
+    This was done to better call out the possible root causes of an\r
+    IOException from these methods.  (Mike McCandless)\r
+\r
+ 2. LUCENE-811: make SegmentInfos class, plus a few methods from related\r
+    classes, package-private again (they were unnecessarily made public\r
+    as part of LUCENE-701).  (Mike McCandless)\r
+\r
+ 3. LUCENE-710: added optional autoCommit boolean to IndexWriter\r
+    constructors.  When this is false, index changes are not committed\r
+    until the writer is closed.  This gives explicit control over when\r
+    a reader will see the changes.  Also added optional custom\r
+    deletion policy to explicitly control when prior commits are\r
+    removed from the index.  This is intended to allow applications to\r
+    share an index over NFS by customizing when prior commits are\r
+    deleted. (Mike McCandless)\r
+\r
+ 4. LUCENE-818: changed most public methods of IndexWriter,\r
+    IndexReader (and its subclasses), FieldsReader and RAMDirectory to\r
+    throw AlreadyClosedException if they are accessed after being\r
+    closed.  (Mike McCandless)\r
+\r
+ 5. LUCENE-834: Changed some access levels for certain Span classes to allow them\r
+    to be overridden.  They have been marked expert only and not for public\r
+    consumption. (Grant Ingersoll) \r
+\r
+ 6. LUCENE-796: Removed calls to super.* from various get*Query methods in\r
+    MultiFieldQueryParser, in order to allow sub-classes to override them.\r
+    (Steven Parkes via Otis Gospodnetic)\r
+\r
+ 7. LUCENE-857: Removed caching from QueryFilter and deprecated QueryFilter\r
+    in favour of QueryWrapperFilter or QueryWrapperFilter + CachingWrapperFilter\r
+    combination when caching is desired.\r
+    (Chris Hostetter, Otis Gospodnetic)\r
+\r
+ 8. LUCENE-869: Changed FSIndexInput and FSIndexOutput to inner classes of FSDirectory\r
+    to enable extensibility of these classes. (Michael Busch)\r
+\r
+ 9. LUCENE-580: Added the public method reset() to TokenStream. This method does\r
+    nothing by default, but may be overwritten by subclasses to support consuming\r
+    the TokenStream more than once. (Michael Busch)\r
+\r
+10. LUCENE-580: Added a new constructor to Field that takes a TokenStream as\r
+    argument, available as tokenStreamValue(). This is useful to avoid the need of \r
+    "dummy analyzers" for pre-analyzed fields. (Karl Wettin, Michael Busch)\r
+\r
+11. LUCENE-730: Added the new methods to BooleanQuery setAllowDocsOutOfOrder() and\r
+    getAllowDocsOutOfOrder(). Deprecated the methods setUseScorer14() and \r
+    getUseScorer14(). The optimization patch LUCENE-730 (see Optimizations->3.) \r
+    improves performance for certain queries but results in scoring out of docid \r
+    order. This patch reverse this change, so now by default hit docs are scored\r
+    in docid order if not setAllowDocsOutOfOrder(true) is explicitly called.\r
+    This patch also enables the tests in QueryUtils again that check for docid\r
+    order. (Paul Elschot, Doron Cohen, Michael Busch)\r
+\r
+12. LUCENE-888: Added Directory.openInput(File path, int bufferSize)\r
+    to optionally specify the size of the read buffer.  Also added\r
+    BufferedIndexInput.setBufferSize(int) to change the buffer size.\r
+    (Mike McCandless)\r
+\r
+13. LUCENE-923: Make SegmentTermPositionVector package-private. It does not need\r
+    to be public because it implements the public interface TermPositionVector.\r
+    (Michael Busch)\r
+\r
+Bug fixes\r
+\r
+ 1. LUCENE-804: Fixed build.xml to pack a fully compilable src dist.  (Doron Cohen)\r
+\r
+ 2. LUCENE-813: Leading wildcard fixed to work with trailing wildcard.\r
+    Query parser modified to create a prefix query only for the case \r
+    that there is a single trailing wildcard (and no additional wildcard \r
+    or '?' in the query text).  (Doron Cohen)\r
+\r
+ 3. LUCENE-812: Add no-argument constructors to NativeFSLockFactory\r
+    and SimpleFSLockFactory.  This enables all 4 builtin LockFactory\r
+    implementations to be specified via the System property\r
+    org.apache.lucene.store.FSDirectoryLockFactoryClass.  (Mike McCandless)\r
+\r
+ 4. LUCENE-821: The new single-norm-file introduced by LUCENE-756\r
+    failed to reduce the number of open descriptors since it was still\r
+    opened once per field with norms. (yonik)\r
+\r
+ 5. LUCENE-823: Make sure internal file handles are closed when\r
+    hitting an exception (eg disk full) while flushing deletes in\r
+    IndexWriter's mergeSegments, and also during\r
+    IndexWriter.addIndexes.  (Mike McCandless)\r
+\r
+ 6. LUCENE-825: If directory is removed after\r
+    FSDirectory.getDirectory() but before IndexReader.open you now get\r
+    a FileNotFoundException like Lucene pre-2.1 (before this fix you\r
+    got an NPE).  (Mike McCandless)\r
+\r
+ 7. LUCENE-800: Removed backslash from the TERM_CHAR list in the queryparser, \r
+    because the backslash is the escape character. Also changed the ESCAPED_CHAR\r
+    list to contain all possible characters, because every character that \r
+    follows a backslash should be considered as escaped. (Michael Busch)\r
+\r
+ 8. LUCENE-372: QueryParser.parse() now ensures that the entire input string \r
+    is consumed. Now a ParseException is thrown if a query contains too many\r
+    closing parentheses. (Andreas Neumann via Michael Busch)\r
+\r
+ 9. LUCENE-814: javacc build targets now fix line-end-style of generated files.\r
+    Now also deleting all javacc generated files before calling javacc.\r
+    (Steven Parkes, Doron Cohen)\r
+    \r
+10. LUCENE-829: close readers in contrib/benchmark. (Karl Wettin, Doron Cohen)\r
+\r
+11. LUCENE-828: Minor fix for Term's equal().\r
+    (Paul Cowan via Otis Gospodnetic)\r
+\r
+12. LUCENE-846: Fixed: if IndexWriter is opened with autoCommit=false,\r
+    and you call addIndexes, and hit an exception (eg disk full) then\r
+    when IndexWriter rolls back its internal state this could corrupt\r
+    the instance of IndexWriter (but, not the index itself) by\r
+    referencing already deleted segments.  This bug was only present\r
+    in 2.2 (trunk), ie was never released.  (Mike McCandless)\r
+    \r
+13. LUCENE-736: Sloppy phrase query with repeating terms matches wrong docs.\r
+    For example query "B C B"~2 matches the doc "A B C D E". (Doron Cohen)\r
+    \r
+14. LUCENE-789: Fixed: custom similarity is ignored when using MultiSearcher (problem reported \r
+    by Alexey Lef). Now the similarity applied by MultiSearcer.setSimilarity(sim) is being used. \r
+    Note that as before this fix, creating a multiSearcher from Searchers for whom custom similarity \r
+    was set has no effect - it is masked by the similarity of the MultiSearcher. This is as \r
+    designed, because MultiSearcher operates on Searchables (not Searchers). (Doron Cohen)\r
+\r
+15. LUCENE-880: Fixed DocumentWriter to close the TokenStreams after it\r
+    has written the postings. Then the resources associated with the \r
+    TokenStreams can safely be released. (Michael Busch)\r
+\r
+16. LUCENE-883: consecutive calls to Spellchecker.indexDictionary()\r
+    won't insert terms twice anymore. (Daniel Naber)\r
+\r
+17. LUCENE-881: QueryParser.escape() now also escapes the characters\r
+    '|' and '&' which are part of the queryparser syntax. (Michael Busch)\r
+\r
+18. LUCENE-886: Spellchecker clean up: exceptions aren't printed to STDERR\r
+    anymore and ignored, but re-thrown. Some javadoc improvements.\r
+    (Daniel Naber)\r
+\r
+19. LUCENE-698: FilteredQuery now takes the query boost into account for \r
+    scoring. (Michael Busch)\r
+\r
+20. LUCENE-763: Spellchecker: LuceneDictionary used to skip first word in \r
+    enumeration. (Christian Mallwitz via Daniel Naber)\r
+    \r
+21. LUCENE-903: FilteredQuery explanation inaccuracy with boost.\r
+    Explanation tests now "deep" check the explanation details.\r
+    (Chris Hostetter, Doron Cohen)\r
+    \r
+22. LUCENE-912: DisjunctionMaxScorer first skipTo(target) call ignores the \r
+    skip target param and ends up at the first match.\r
+    (Sudaakeran B. via Chris Hostetter & Doron Cohen)\r
+    \r
+23. LUCENE-913: Two consecutive score() calls return different \r
+    scores for Boolean Queries. (Michael Busch, Doron Cohen)\r
+\r
+24. LUCENE-1013: Fix IndexWriter.setMaxMergeDocs to work "out of the\r
+    box", again, by moving set/getMaxMergeDocs up from\r
+    LogDocMergePolicy into LogMergePolicy.  This fixes the API\r
+    breakage (non backwards compatible change) caused by LUCENE-994.\r
+    (Yonik Seeley via Mike McCandless)\r
+\r
+New features\r
+\r
+ 1. LUCENE-759: Added two n-gram-producing TokenFilters.\r
+    (Otis Gospodnetic)\r
+\r
+ 2. LUCENE-822: Added FieldSelector capabilities to Searchable for use with\r
+    RemoteSearcher, and other Searchable implementations. (Mark Miller, Grant Ingersoll)\r
+\r
+ 3. LUCENE-755: Added the ability to store arbitrary binary metadata in the posting list.\r
+    These metadata are called Payloads. For every position of a Token one Payload in the form\r
+    of a variable length byte array can be stored in the prox file.\r
+    Remark: The APIs introduced with this feature are in experimental state and thus\r
+            contain appropriate warnings in the javadocs.\r
+    (Michael Busch)\r
+\r
+ 4. LUCENE-834: Added BoostingTermQuery which can boost scores based on the\r
+    values of a payload (see #3 above.) (Grant Ingersoll)\r
+\r
+ 5. LUCENE-834: Similarity has a new method for scoring payloads called\r
+    scorePayloads that can be overridden to take advantage of payload\r
+    storage (see #3 above)\r
+\r
+ 6. LUCENE-834: Added isPayloadAvailable() onto TermPositions interface and\r
+    implemented it in the appropriate places (Grant Ingersoll)\r
+\r
+ 7. LUCENE-853: Added RemoteCachingWrapperFilter to enable caching of Filters\r
+    on the remote side of the RMI connection.\r
+    (Matt Ericson via Otis Gospodnetic)\r
+\r
+ 8. LUCENE-446: Added Solr's search.function for scores based on field \r
+    values, plus CustomScoreQuery for simple score (post) customization.\r
+    (Yonik Seeley, Doron Cohen)\r
+\r
+ 9. LUCENE-1058: Added new TeeTokenFilter (like the UNIX 'tee' command) and SinkTokenizer which can be used to share tokens between two or more\r
+    Fields such that the other Fields do not have to go through the whole Analysis process over again.  For instance, if you have two\r
+    Fields that share all the same analysis steps except one lowercases tokens and the other does not, you can coordinate the operations\r
+    between the two using the TeeTokenFilter and the SinkTokenizer.  See TeeSinkTokenTest.java for examples.\r
+    (Grant Ingersoll, Michael Busch, Yonik Seeley)\r
\r
+Optimizations\r
+\r
+ 1. LUCENE-761: The proxStream is now cloned lazily in SegmentTermPositions\r
+    when nextPosition() is called for the first time. This allows using instances\r
+    of SegmentTermPositions instead of SegmentTermDocs without additional costs.\r
+    (Michael Busch)\r
+\r
+ 2. LUCENE-431: RAMInputStream and RAMOutputStream extend IndexInput and\r
+    IndexOutput directly now. This avoids further buffering and thus avoids \r
+    unnecessary array copies. (Michael Busch)\r
+\r
+ 3. LUCENE-730: Updated BooleanScorer2 to make use of BooleanScorer in some\r
+    cases and possibly improve scoring performance.  Documents can now be\r
+    delivered out-of-order as they are scored (e.g. to HitCollector).\r
+    N.B. A bit of code had to be disabled in QueryUtils in order for\r
+    TestBoolean2 test to keep passing.\r
+    (Paul Elschot via Otis Gospodnetic)\r
+\r
+ 4. LUCENE-882: Spellchecker doesn't store the ngrams anymore but only indexes\r
+    them to keep the spell index small. (Daniel Naber)\r
+\r
+ 5. LUCENE-430: Delay allocation of the buffer after a clone of BufferedIndexInput.\r
+    Together with LUCENE-888 this will allow to adjust the buffer size\r
+    dynamically. (Paul Elschot, Michael Busch)\r
\r
+ 6. LUCENE-888: Increase buffer sizes inside CompoundFileWriter and\r
+    BufferedIndexOutput.  Also increase buffer size in\r
+    BufferedIndexInput, but only when used during merging.  Together,\r
+    these increases yield 10-18% overall performance gain vs the\r
+    previous 1K defaults.  (Mike McCandless)\r
+\r
+ 7. LUCENE-866: Adds multi-level skip lists to the posting lists. This speeds \r
+    up most queries that use skipTo(), especially on big indexes with large posting \r
+    lists. For average AND queries the speedup is about 20%, for queries that \r
+    contain very frequent and very unique terms the speedup can be over 80%.\r
+    (Michael Busch)\r
+\r
+Documentation\r
+\r
+ 1. LUCENE 791 && INFRA-1173: Infrastructure moved the Wiki to\r
+    http://wiki.apache.org/lucene-java/   Updated the links in the docs and\r
+    wherever else I found references.  (Grant Ingersoll, Joe Schaefer)\r
+\r
+ 2. LUCENE-807: Fixed the javadoc for ScoreDocComparator.compare() to be \r
+    consistent with java.util.Comparator.compare(): Any integer is allowed to \r
+    be returned instead of only -1/0/1.\r
+    (Paul Cowan via Michael Busch)\r
\r
+ 3. LUCENE-875: Solved javadoc warnings & errors under jdk1.4. \r
+    Solved javadoc errors under jdk5 (jars in path for gdata).\r
+    Made "javadocs" target depend on "build-contrib" for first downloading\r
+    contrib jars configured for dynamic downloaded. (Note: when running\r
+    behind firewall, a firewall prompt might pop up) (Doron Cohen)\r
+\r
+ 4. LUCENE-740: Added SNOWBALL-LICENSE.txt to the snowball package and a\r
+    remark about the license to NOTICE.TXT. (Steven Parkes via Michael Busch)\r
+\r
+ 5. LUCENE-925: Added analysis package javadocs. (Grant Ingersoll and Doron Cohen)\r
+\r
+ 6. LUCENE-926: Added document package javadocs. (Grant Ingersoll)\r
+\r
+Build\r
+\r
+ 1. LUCENE-802: Added LICENSE.TXT and NOTICE.TXT to Lucene jars.\r
+    (Steven Parkes via Michael Busch)\r
+\r
+ 2. LUCENE-885: "ant test" now includes all contrib tests.  The new\r
+    "ant test-core" target can be used to run only the Core (non\r
+    contrib) tests. \r
+    (Chris Hostetter)\r
+    \r
+ 3. LUCENE-900: "ant test" now enables Java assertions (in Lucene packages).\r
+    (Doron Cohen)\r
+\r
+ 4. LUCENE-894: Add custom build file for binary distributions that includes\r
+    targets to build the demos. (Chris Hostetter, Michael Busch)\r
+\r
+ 5. LUCENE-904: The "package" targets in build.xml now also generate .md5\r
+    checksum files. (Chris Hostetter, Michael Busch)\r
+\r
+ 6. LUCENE-907: Include LICENSE.TXT and NOTICE.TXT in the META-INF dirs of\r
+    demo war, demo jar, and the contrib jars. (Michael Busch)\r
+    \r
+ 7. LUCENE-909: Demo targets for running the demo. (Doron Cohen)\r
+\r
+ 8. LUCENE-908: Improves content of MANIFEST file and makes it customizable\r
+    for the contribs. Adds SNOWBALL-LICENSE.txt to META-INF of the snowball\r
+    jar and makes sure that the lucli jar contains LICENSE.txt and NOTICE.txt.\r
+    (Chris Hostetter, Michael Busch)\r
+\r
+ 9. LUCENE-930: Various contrib building improvements to ensure contrib\r
+    dependencies are met, and test compilation errors fail the build.\r
+    (Steven Parkes, Chris Hostetter)\r
+\r
+10. LUCENE-622: Add ant target and pom.xml files for building maven artifacts \r
+    of the Lucene core and the contrib modules. \r
+    (Sami Siren, Karl Wettin, Michael Busch)\r
+\r
+======================= Release 2.1.0 2007-02-14 =======================\r
+\r
+Changes in runtime behavior\r
+\r
+ 1. 's' and 't' have been removed from the list of default stopwords\r
+    in StopAnalyzer (also used in by StandardAnalyzer). Having e.g. 's'\r
+    as a stopword meant that 's-class' led to the same results as 'class'.\r
+    Note that this problem still exists for 'a', e.g. in 'a-class' as\r
+    'a' continues to be a stopword.\r
+    (Daniel Naber)\r
+\r
+ 2. LUCENE-478: Updated the list of Unicode code point ranges for CJK\r
+    (now split into CJ and K) in StandardAnalyzer.  (John Wang and\r
+    Steven Rowe via Otis Gospodnetic)\r
+\r
+ 3. Modified some CJK Unicode code point ranges in StandardTokenizer.jj,\r
+    and added a few more of them to increase CJK character coverage.\r
+    Also documented some of the ranges.\r
+    (Otis Gospodnetic)\r
+\r
+ 4. LUCENE-489: Add support for leading wildcard characters (*, ?) to\r
+    QueryParser.  Default is to disallow them, as before.\r
+    (Steven Parkes via Otis Gospodnetic)\r
+\r
+ 5. LUCENE-703: QueryParser changed to default to use of ConstantScoreRangeQuery\r
+    for range queries. Added useOldRangeQuery property to QueryParser to allow\r
+    selection of old RangeQuery class if required.\r
+    (Mark Harwood)\r
+\r
+ 6. LUCENE-543: WildcardQuery now performs a TermQuery if the provided term\r
+    does not contain a wildcard character (? or *), when previously a\r
+    StringIndexOutOfBoundsException was thrown.\r
+    (Michael Busch via Erik Hatcher)\r
+\r
+ 7. LUCENE-726: Removed the use of deprecated doc.fields() method and\r
+    Enumeration.\r
+    (Michael Busch via Otis Gospodnetic)\r
+\r
+ 8. LUCENE-436: Removed finalize() in TermInfosReader and SegmentReader,\r
+    and added a call to enumerators.remove() in TermInfosReader.close().\r
+    The finalize() overrides were added to help with a pre-1.4.2 JVM bug\r
+    that has since been fixed, plus we no longer support pre-1.4.2 JVMs.\r
+    (Otis Gospodnetic)\r
+\r
+ 9. LUCENE-771: The default location of the write lock is now the\r
+    index directory, and is named simply "write.lock" (without a big\r
+    digest prefix).  The system properties "org.apache.lucene.lockDir"\r
+    nor "java.io.tmpdir" are no longer used as the global directory\r
+    for storing lock files, and the LOCK_DIR field of FSDirectory is\r
+    now deprecated.  (Mike McCandless)\r
+\r
+New features\r
+\r
+ 1. LUCENE-503: New ThaiAnalyzer and ThaiWordFilter in contrib/analyzers\r
+    (Samphan Raruenrom via Chris Hostetter)\r
+\r
+ 2. LUCENE-545: New FieldSelector API and associated changes to\r
+    IndexReader and implementations.  New Fieldable interface for use\r
+    with the lazy field loading mechanism.  (Grant Ingersoll and Chuck\r
+    Williams via Grant Ingersoll)\r
+\r
+ 3. LUCENE-676: Move Solr's PrefixFilter to Lucene core. (Yura\r
+    Smolsky, Yonik Seeley)\r
+\r
+ 4. LUCENE-678: Added NativeFSLockFactory, which implements locking\r
+    using OS native locking (via java.nio.*).  (Michael McCandless via\r
+    Yonik Seeley)\r
+\r
+ 5. LUCENE-544: Added the ability to specify different boosts for\r
+    different fields when using MultiFieldQueryParser (Matt Ericson\r
+    via Otis Gospodnetic)\r
+\r
+ 6. LUCENE-528: New IndexWriter.addIndexesNoOptimize() that doesn't\r
+    optimize the index when adding new segments, only performing\r
+    merges as needed.  (Ning Li via Yonik Seeley)\r
+\r
+ 7. LUCENE-573: QueryParser now allows backslash escaping in\r
+    quoted terms and phrases. (Michael Busch via Yonik Seeley)\r
+\r
+ 8. LUCENE-716: QueryParser now allows specification of Unicode\r
+    characters in terms via a unicode escape of the form \uXXXX\r
+    (Michael Busch via Yonik Seeley)\r
+\r
+ 9. LUCENE-709: Added RAMDirectory.sizeInBytes(), IndexWriter.ramSizeInBytes()\r
+    and IndexWriter.flushRamSegments(), allowing applications to\r
+    control the amount of memory used to buffer documents.\r
+    (Chuck Williams via Yonik Seeley)\r
+\r
+10. LUCENE-723: QueryParser now parses *:* as MatchAllDocsQuery\r
+    (Yonik Seeley)\r
+\r
+11. LUCENE-741: Command-line utility for modifying or removing norms\r
+    on fields in an existing index.  This is mostly based on LUCENE-496\r
+    and lives in contrib/miscellaneous.\r
+    (Chris Hostetter, Otis Gospodnetic)\r
+\r
+12. LUCENE-759: Added NGramTokenizer and EdgeNGramTokenizer classes and\r
+    their passing unit tests.\r
+    (Otis Gospodnetic)\r
+\r
+13. LUCENE-565: Added methods to IndexWriter to more efficiently\r
+    handle updating documents (the "delete then add" use case).  This\r
+    is intended to be an eventual replacement for the existing\r
+    IndexModifier.  Added IndexWriter.flush() (renamed from\r
+    flushRamSegments()) to flush all pending updates (held in RAM), to\r
+    the Directory.  (Ning Li via Mike McCandless)\r
+\r
+14. LUCENE-762: Added in SIZE and SIZE_AND_BREAK FieldSelectorResult options\r
+    which allow one to retrieve the size of a field without retrieving the\r
+    actual field. (Chuck Williams via Grant Ingersoll)\r
+\r
+15. LUCENE-799: Properly handle lazy, compressed fields.\r
+    (Mike Klaas via Grant Ingersoll)\r
+\r
+API Changes\r
+\r
+ 1. LUCENE-438: Remove "final" from Token, implement Cloneable, allow\r
+    changing of termText via setTermText().  (Yonik Seeley)\r
+\r
+ 2. org.apache.lucene.analysis.nl.WordlistLoader has been deprecated\r
+    and is supposed to be replaced with the WordlistLoader class in\r
+    package org.apache.lucene.analysis (Daniel Naber)\r
+\r
+ 3. LUCENE-609: Revert return type of Document.getField(s) to Field\r
+    for backward compatibility, added new Document.getFieldable(s)\r
+    for access to new lazy loaded fields. (Yonik Seeley)\r
+\r
+ 4. LUCENE-608: Document.fields() has been deprecated and a new method\r
+    Document.getFields() has been added that returns a List instead of\r
+    an Enumeration (Daniel Naber)\r
+\r
+ 5. LUCENE-605: New Explanation.isMatch() method and new ComplexExplanation\r
+    subclass allows explain methods to produce Explanations which model\r
+    "matching" independent of having a positive value.\r
+    (Chris Hostetter)\r
+\r
+ 6. LUCENE-621: New static methods IndexWriter.setDefaultWriteLockTimeout\r
+    and IndexWriter.setDefaultCommitLockTimeout for overriding default\r
+    timeout values for all future instances of IndexWriter (as well\r
+    as for any other classes that may reference the static values,\r
+    ie: IndexReader).\r
+    (Michael McCandless via Chris Hostetter)\r
+\r
+ 7. LUCENE-638: FSDirectory.list() now only returns the directory's\r
+    Lucene-related files. Thanks to this change one can now construct\r
+    a RAMDirectory from a file system directory that contains files\r
+    not related to Lucene.\r
+    (Simon Willnauer via Daniel Naber)\r
+\r
+ 8. LUCENE-635: Decoupling locking implementation from Directory\r
+    implementation.  Added set/getLockFactory to Directory and moved\r
+    all locking code into subclasses of abstract class LockFactory.\r
+    FSDirectory and RAMDirectory still default to their prior locking\r
+    implementations, but now you can mix & match, for example using\r
+    SingleInstanceLockFactory (ie, in memory locking) locking with an\r
+    FSDirectory.  Note that now you must call setDisableLocks before\r
+    the instantiation a FSDirectory if you wish to disable locking\r
+    for that Directory.\r
+    (Michael McCandless, Jeff Patterson via Yonik Seeley)\r
+\r
+ 9. LUCENE-657: Made FuzzyQuery non-final and inner ScoreTerm protected.\r
+    (Steven Parkes via Otis Gospodnetic)\r
+\r
+10. LUCENE-701: Lockless commits: a commit lock is no longer required\r
+    when a writer commits and a reader opens the index.  This includes\r
+    a change to the index file format (see docs/fileformats.html for\r
+    details).  It also removes all APIs associated with the commit\r
+    lock & its timeout.  Readers are now truly read-only and do not\r
+    block one another on startup.  This is the first step to getting\r
+    Lucene to work correctly over NFS (second step is\r
+    LUCENE-710). (Mike McCandless)\r
+\r
+11. LUCENE-722: DEFAULT_MIN_DOC_FREQ was misspelled DEFALT_MIN_DOC_FREQ\r
+    in Similarity's MoreLikeThis class. The misspelling has been\r
+    replaced by the correct spelling.\r
+    (Andi Vajda via Daniel Naber)\r
+\r
+12. LUCENE-738: Reduce the size of the file that keeps track of which\r
+    documents are deleted when the number of deleted documents is\r
+    small.  This changes the index file format and cannot be\r
+    read by previous versions of Lucene.  (Doron Cohen via Yonik Seeley)\r
+\r
+13. LUCENE-756: Maintain all norms in a single .nrm file to reduce the\r
+    number of open files and file descriptors for the non-compound index\r
+    format.  This changes the index file format, but maintains the\r
+    ability to read and update older indices. The first segment merge\r
+    on an older format index will create a single .nrm file for the new\r
+    segment.  (Doron Cohen via Yonik Seeley)\r
+\r
+14. LUCENE-732: DateTools support has been added to QueryParser, with\r
+    setters for both the default Resolution, and per-field Resolution.\r
+    For backwards compatibility, DateField is still used if no Resolutions\r
+    are specified. (Michael Busch via Chris Hostetter)\r
+\r
+15. Added isOptimized() method to IndexReader.\r
+    (Otis Gospodnetic)\r
+\r
+16. LUCENE-773: Deprecate the FSDirectory.getDirectory(*) methods that\r
+    take a boolean "create" argument.  Instead you should use\r
+    IndexWriter's "create" argument to create a new index.\r
+    (Mike McCandless)\r
+\r
+17. LUCENE-780: Add a static Directory.copy() method to copy files\r
+    from one Directory to another.  (Jiri Kuhn via Mike McCandless)\r
+\r
+18. LUCENE-773: Added Directory.clearLock(String name) to forcefully\r
+    remove an old lock.  The default implementation is to ask the\r
+    lockFactory (if non null) to clear the lock.  (Mike McCandless)\r
+\r
+19. LUCENE-795: Directory.renameFile() has been deprecated as it is\r
+    not used anymore inside Lucene.  (Daniel Naber)\r
+\r
+Bug fixes\r
+\r
+ 1. Fixed the web application demo (built with "ant war-demo") which\r
+    didn't work because it used a QueryParser method that had\r
+    been removed (Daniel Naber)\r
+\r
+ 2. LUCENE-583: ISOLatin1AccentFilter fails to preserve positionIncrement\r
+    (Yonik Seeley)\r
+\r
+ 3. LUCENE-575: SpellChecker min score is incorrectly changed by suggestSimilar\r
+    (Karl Wettin via Yonik Seeley)\r
+\r
+ 4. LUCENE-587: Explanation.toHtml was producing malformed HTML\r
+    (Chris Hostetter)\r
+\r
+ 5. Fix to allow MatchAllDocsQuery to be used with RemoteSearcher (Yonik Seeley)\r
+\r
+ 6. LUCENE-601: RAMDirectory and RAMFile made Serializable\r
+    (Karl Wettin via Otis Gospodnetic)\r
+\r
+ 7. LUCENE-557: Fixes to BooleanQuery and FilteredQuery so that the score\r
+    Explanations match up with the real scores.\r
+    (Chris Hostetter)\r
+\r
+ 8. LUCENE-607: ParallelReader's TermEnum fails to advance properly to\r
+    new fields (Chuck Williams, Christian Kohlschuetter via Yonik Seeley)\r
+\r
+ 9. LUCENE-610,LUCENE-611: Simple syntax changes to allow compilation with ecj:\r
+    disambiguate inner class scorer's use of doc() in BooleanScorer2,\r
+    other test code changes.  (DM Smith via Yonik Seeley)\r
+\r
+10. LUCENE-451: All core query types now use ComplexExplanations so that\r
+    boosts of zero don't confuse the BooleanWeight explain method.\r
+    (Chris Hostetter)\r
+\r
+11. LUCENE-593: Fixed LuceneDictionary's inner Iterator\r
+    (Kรฅre Fiedler Christiansen via Otis Gospodnetic)\r
+\r
+12. LUCENE-641: fixed an off-by-one bug with IndexWriter.setMaxFieldLength()\r
+    (Daniel Naber)\r
+\r
+13. LUCENE-659: Make PerFieldAnalyzerWrapper delegate getPositionIncrementGap()\r
+    to the correct analyzer for the field. (Chuck Williams via Yonik Seeley)\r
+\r
+14. LUCENE-650: Fixed NPE in Locale specific String Sort when Document\r
+    has no value.\r
+    (Oliver Hutchison via Chris Hostetter)\r
+\r
+15. LUCENE-683: Fixed data corruption when reading lazy loaded fields.\r
+    (Yonik Seeley)\r
+\r
+16. LUCENE-678: Fixed bug in NativeFSLockFactory which caused the same\r
+    lock to be shared between different directories.\r
+    (Michael McCandless via Yonik Seeley)\r
+\r
+17. LUCENE-690: Fixed thread unsafe use of IndexInput by lazy loaded fields.\r
+    (Yonik Seeley)\r
+\r
+18. LUCENE-696: Fix bug when scorer for DisjunctionMaxQuery has skipTo()\r
+    called on it before next().  (Yonik Seeley)\r
+\r
+19. LUCENE-569: Fixed SpanNearQuery bug, for 'inOrder' queries it would fail\r
+    to recognize ordered spans if they overlapped with unordered spans.\r
+    (Paul Elschot via Chris Hostetter)\r
+\r
+20. LUCENE-706: Updated fileformats.xml|html concerning the docdelta value\r
+    in the frequency file. (Johan Stuyts, Doron Cohen via Grant Ingersoll)\r
+\r
+21. LUCENE-715: Fixed private constructor in IndexWriter.java to\r
+    properly release the acquired write lock if there is an\r
+    IOException after acquiring the write lock but before finishing\r
+    instantiation. (Matthew Bogosian via Mike McCandless)\r
+\r
+22. LUCENE-651: Multiple different threads requesting the same\r
+    FieldCache entry (often for Sorting by a field) at the same\r
+    time caused multiple generations of that entry, which was\r
+    detrimental to performance and memory use.\r
+    (Oliver Hutchison via Otis Gospodnetic)\r
+\r
+23. LUCENE-717: Fixed build.xml not to fail when there is no lib dir.\r
+    (Doron Cohen via Otis Gospodnetic)\r
+\r
+24. LUCENE-728: Removed duplicate/old MoreLikeThis and SimilarityQueries\r
+    classes from contrib/similarity, as their new home is under\r
+    contrib/queries.\r
+    (Otis Gospodnetic)\r
+\r
+25. LUCENE-669: Do not double-close the RandomAccessFile in\r
+    FSIndexInput/Output during finalize().  Besides sending an\r
+    IOException up to the GC, this may also be the cause intermittent\r
+    "The handle is invalid" IOExceptions on Windows when trying to\r
+    close readers or writers. (Michael Busch via Mike McCandless)\r
+\r
+26. LUCENE-702: Fix IndexWriter.addIndexes(*) to not corrupt the index\r
+    on any exceptions (eg disk full).  The semantics of these methods\r
+    is now transactional: either all indices are merged or none are.\r
+    Also fixed IndexWriter.mergeSegments (called outside of\r
+    addIndexes(*) by addDocument, optimize, flushRamSegments) and\r
+    IndexReader.commit() (called by close) to clean up and keep the\r
+    instance state consistent to what's actually in the index (Mike\r
+    McCandless).\r
+\r
+27. LUCENE-129: Change finalizers to do "try {...} finally\r
+    {super.finalize();}" to make sure we don't miss finalizers in\r
+    classes above us. (Esmond Pitt via Mike McCandless)\r
+\r
+28. LUCENE-754: Fix a problem introduced by LUCENE-651, causing\r
+    IndexReaders to hang around forever, in addition to not\r
+    fixing the original FieldCache performance problem.\r
+    (Chris Hostetter, Yonik Seeley)\r
+\r
+29. LUCENE-140: Fix IndexReader.deleteDocument(int docNum) to\r
+    correctly raise ArrayIndexOutOfBoundsException when docNum is too\r
+    large.  Previously, if docNum was only slightly too large (within\r
+    the same multiple of 8, ie, up to 7 ints beyond maxDoc), no\r
+    exception would be raised and instead the index would become\r
+    silently corrupted.  The corruption then only appears much later,\r
+    in mergeSegments, when the corrupted segment is merged with\r
+    segment(s) after it. (Mike McCandless)\r
+\r
+30. LUCENE-768: Fix case where an Exception during deleteDocument,\r
+    undeleteAll or setNorm in IndexReader could leave the reader in a\r
+    state where close() fails to release the write lock.\r
+    (Mike McCandless)\r
+\r
+31. Remove "tvp" from known index file extensions because it is\r
+    never used. (Nicolas Lalevรฉe via Bernhard Messer)\r
+    \r
+32. LUCENE-767: Change how SegmentReader.maxDoc() is computed to not\r
+    rely on file length check and instead use the SegmentInfo's\r
+    docCount that's already stored explicitly in the index.  This is a\r
+    defensive bug fix (ie, there is no known problem seen "in real\r
+    life" due to this, just a possible future problem).  (Chuck\r
+    Williams via Mike McCandless)\r
+\r
+Optimizations\r
+\r
+  1. LUCENE-586: TermDocs.skipTo() is now more efficient for\r
+     multi-segment indexes.  This will improve the performance of many\r
+     types of queries against a non-optimized index. (Andrew Hudson\r
+     via Yonik Seeley)\r
+\r
+  2. LUCENE-623: RAMDirectory.close now nulls out its reference to all\r
+     internal "files", allowing them to be GCed even if references to the\r
+     RAMDirectory itself still exist. (Nadav Har'El via Chris Hostetter)\r
+\r
+  3. LUCENE-629: Compressed fields are no longer uncompressed and\r
+     recompressed during segment merges (e.g. during indexing or\r
+     optimizing), thus improving performance . (Michael Busch via Otis\r
+     Gospodnetic)\r
+\r
+  4. LUCENE-388: Improve indexing performance when maxBufferedDocs is\r
+     large by keeping a count of buffered documents rather than\r
+     counting after each document addition.  (Doron Cohen, Paul Smith,\r
+     Yonik Seeley)\r
+\r
+  5. Modified TermScorer.explain to use TermDocs.skipTo() instead of\r
+     looping through docs. (Grant Ingersoll)\r
+\r
+  6. LUCENE-672: New indexing segment merge policy flushes all\r
+     buffered docs to their own segment and delays a merge until\r
+     mergeFactor segments of a certain level have been accumulated.\r
+     This increases indexing performance in the presence of deleted\r
+     docs or partially full segments as well as enabling future\r
+     optimizations.\r
+\r
+     NOTE: this also fixes an "under-merging" bug whereby it is\r
+     possible to get far too many segments in your index (which will\r
+     drastically slow down search, risks exhausting file descriptor\r
+     limit, etc.).  This can happen when the number of buffered docs\r
+     at close, plus the number of docs in the last non-ram segment is\r
+     greater than mergeFactor. (Ning Li, Yonik Seeley)\r
+\r
+  7. Lazy loaded fields unnecessarily retained an extra copy of loaded\r
+     String data.  (Yonik Seeley)\r
+\r
+  8. LUCENE-443: ConjunctionScorer performance increase.  Speed up\r
+     any BooleanQuery with more than one mandatory clause.\r
+     (Abdul Chaudhry, Paul Elschot via Yonik Seeley)\r
+\r
+  9. LUCENE-365: DisjunctionSumScorer performance increase of\r
+     ~30%. Speeds up queries with optional clauses. (Paul Elschot via\r
+     Yonik Seeley)\r
+\r
+ 10. LUCENE-695: Optimized BufferedIndexInput.readBytes() for medium\r
+     size buffers, which will speed up merging and retrieving binary\r
+     and compressed fields.  (Nadav Har'El via Yonik Seeley)\r
+\r
+ 11. LUCENE-687: Lazy skipping on proximity file speeds up most\r
+     queries involving term positions, including phrase queries.\r
+     (Michael Busch via Yonik Seeley)\r
+\r
+ 12. LUCENE-714: Replaced 2 cases of manual for-loop array copying\r
+     with calls to System.arraycopy instead, in DocumentWriter.java.\r
+     (Nicolas Lalevee via Mike McCandless)\r
+\r
+ 13. LUCENE-729: Non-recursive skipTo and next implementation of\r
+     TermDocs for a MultiReader.  The old implementation could\r
+     recurse up to the number of segments in the index. (Yonik Seeley)\r
+\r
+ 14. LUCENE-739: Improve segment merging performance by reusing\r
+     the norm array across different fields and doing bulk writes\r
+     of norms of segments with no deleted docs.\r
+    (Michael Busch via Yonik Seeley)\r
+\r
+ 15. LUCENE-745: Add BooleanQuery.clauses(), allowing direct access\r
+     to the List of clauses and replaced the internal synchronized Vector\r
+     with an unsynchronized List. (Yonik Seeley)\r
+\r
+ 16. LUCENE-750: Remove finalizers from FSIndexOutput and move the\r
+     FSIndexInput finalizer to the actual file so all clones don't\r
+     register a new finalizer. (Yonik Seeley)\r
+\r
+Test Cases\r
+\r
+  1. Added TestTermScorer.java (Grant Ingersoll)\r
+\r
+  2. Added TestWindowsMMap.java (Benson Margulies via Mike McCandless)\r
+\r
+  3. LUCENE-744 Append the user.name property onto the temporary directory \r
+     that is created so it doesn't interfere with other users. (Grant Ingersoll)\r
+\r
+Documentation\r
+\r
+  1. Added style sheet to xdocs named lucene.css and included in the\r
+     Anakia VSL descriptor.  (Grant Ingersoll)\r
+\r
+  2. Added scoring.xml document into xdocs.  Updated Similarity.java\r
+     scoring formula.(Grant Ingersoll and Steve Rowe.  Updates from:\r
+     Michael McCandless, Doron Cohen, Chris Hostetter, Doug Cutting).\r
+     Issue 664.\r
+\r
+  3. Added javadocs for FieldSelectorResult.java. (Grant Ingersoll)\r
+\r
+  4. Moved xdocs directory to src/site/src/documentation/content/xdocs per\r
+     Issue 707.  Site now builds using Forrest, just like the other Lucene\r
+     siblings.  See http://wiki.apache.org/jakarta-lucene/HowToUpdateTheWebsite\r
+     for info on updating the website. (Grant Ingersoll with help from Steve Rowe,\r
+     Chris Hostetter, Doug Cutting, Otis Gospodnetic, Yonik Seeley)\r
+\r
+  5. Added in Developer and System Requirements sections under Resources (Grant Ingersoll)\r
+\r
+  6. LUCENE-713 Updated the Term Vector section of File Formats to include\r
+     documentation on how Offset and Position info are stored in the TVF file.\r
+     (Grant Ingersoll, Samir Abdou)\r
+\r
+  7. Added in link to Clover Test Code Coverage Reports under the Develop\r
+     section in Resources (Grant Ingersoll)\r
+\r
+  8. LUCENE-748: Added details for semantics of IndexWriter.close on\r
+     hitting an Exception.  (Jed Wesley-Smith via Mike McCandless)\r
+\r
+  9. Added some text about what is contained in releases.\r
+     (Eric Haszlakiewicz via Grant Ingersoll)\r
+\r
+  10. LUCENE-758: Fix javadoc to clarify that RAMDirectory(Directory)\r
+      makes a full copy of the starting Directory.  (Mike McCandless)\r
+\r
+  11. LUCENE-764: Fix javadocs to detail temporary space requirements\r
+      for IndexWriter's optimize(), addIndexes(*) and addDocument(...)\r
+      methods.  (Mike McCandless)\r
+\r
+Build\r
+\r
+  1. Added in clover test code coverage per http://issues.apache.org/jira/browse/LUCENE-721\r
+     To enable clover code coverage, you must have clover.jar in the ANT\r
+     classpath and specify -Drun.clover=true on the command line.\r
+     (Michael Busch and Grant Ingersoll)\r
+\r
+  2. Added a sysproperty in common-build.xml per Lucene 752 to map java.io.tmpdir to\r
+     ${build.dir}/test just like the tempDir sysproperty.\r
+\r
+  3. LUCENE-757 Added new target named init-dist that does setup for\r
+     distribution of both binary and source distributions.  Called by package \r
+     and package-*-src\r
+\r
+======================= Release 2.0.0 2006-05-26 =======================\r
+\r
+API Changes\r
+\r
+ 1. All deprecated methods and fields have been removed, except\r
+    DateField, which will still be supported for some time\r
+    so Lucene can read its date fields from old indexes\r
+    (Yonik Seeley & Grant Ingersoll)\r
+\r
+ 2. DisjunctionSumScorer is no longer public.\r
+    (Paul Elschot via Otis Gospodnetic)\r
+\r
+ 3. Creating a Field with both an empty name and an empty value\r
+    now throws an IllegalArgumentException\r
+    (Daniel Naber)\r
+\r
+ 4. LUCENE-301: Added new IndexWriter({String,File,Directory},\r
+    Analyzer) constructors that do not take a boolean "create"\r
+    argument.  These new constructors will create a new index if\r
+    necessary, else append to the existing one.  (Dan Armbrust via\r
+    Mike McCandless)\r
+\r
+New features\r
+\r
+ 1. LUCENE-496: Command line tool for modifying the field norms of an\r
+    existing index; added to contrib/miscellaneous.  (Chris Hostetter)\r
+\r
+ 2. LUCENE-577: SweetSpotSimilarity added to contrib/miscellaneous.\r
+    (Chris Hostetter)\r
+    \r
+Bug fixes\r
+\r
+ 1. LUCENE-330: Fix issue of FilteredQuery not working properly within\r
+    BooleanQuery.  (Paul Elschot via Erik Hatcher)\r
+\r
+ 2. LUCENE-515: Make ConstantScoreRangeQuery and ConstantScoreQuery work\r
+    with RemoteSearchable.  (Philippe Laflamme via Yonik Seeley)\r
+\r
+ 3. Added methods to get/set writeLockTimeout and commitLockTimeout in\r
+    IndexWriter. These could be set in Lucene 1.4 using a system property.\r
+    This feature had been removed without adding the corresponding\r
+    getter/setter methods.  (Daniel Naber)\r
+\r
+ 4. LUCENE-413: Fixed ArrayIndexOutOfBoundsException exceptions\r
+    when using SpanQueries. (Paul Elschot via Yonik Seeley)\r
+\r
+ 5. Implemented FilterIndexReader.getVersion() and isCurrent()\r
+    (Yonik Seeley)\r
+\r
+ 6. LUCENE-540: Fixed a bug with IndexWriter.addIndexes(Directory[])\r
+    that sometimes caused the index order of documents to change.\r
+    (Yonik Seeley)\r
+\r
+ 7. LUCENE-526: Fixed a bug in FieldSortedHitQueue that caused\r
+    subsequent String sorts with different locales to sort identically.\r
+    (Paul Cowan via Yonik Seeley)\r
+\r
+ 8. LUCENE-541: Add missing extractTerms() to DisjunctionMaxQuery\r
+    (Stefan Will via Yonik Seeley)\r
+\r
+ 9. LUCENE-514: Added getTermArrays() and extractTerms() to\r
+    MultiPhraseQuery (Eric Jain & Yonik Seeley)\r
+\r
+10. LUCENE-512: Fixed ClassCastException in ParallelReader.getTermFreqVectors\r
+    (frederic via Yonik)\r
+\r
+11. LUCENE-352: Fixed bug in SpanNotQuery that manifested as\r
+    NullPointerException when "exclude" query was not a SpanTermQuery.\r
+    (Chris Hostetter)\r
+\r
+12. LUCENE-572: Fixed bug in SpanNotQuery hashCode, was ignoring exclude clause\r
+    (Chris Hostetter)\r
+\r
+13. LUCENE-561: Fixed some ParallelReader bugs. NullPointerException if the reader\r
+    didn't know about the field yet, reader didn't keep track if it had deletions,\r
+    and deleteDocument calls could circumvent synchronization on the subreaders.\r
+    (Chuck Williams via Yonik Seeley)\r
+\r
+14. LUCENE-556: Added empty extractTerms() implementation to MatchAllDocsQuery and\r
+    ConstantScoreQuery in order to allow their use with a MultiSearcher.\r
+    (Yonik Seeley)\r
+\r
+15. LUCENE-546: Removed 2GB file size limitations for RAMDirectory.\r
+    (Peter Royal, Michael Chan, Yonik Seeley)\r
+\r
+16. LUCENE-485: Don't hold commit lock while removing obsolete index\r
+    files.  (Luc Vanlerberghe via cutting)\r
+\r
+\r
+1.9.1\r
+\r
+Bug fixes\r
+\r
+ 1. LUCENE-511: Fix a bug in the BufferedIndexOutput optimization\r
+    introduced in 1.9-final.  (Shay Banon & Steven Tamm via cutting)\r
+\r
+1.9 final\r
+\r
+Note that this release is mostly but not 100% source compatible with\r
+the previous release of Lucene (1.4.3). In other words, you should\r
+make sure your application compiles with this version of Lucene before\r
+you replace the old Lucene JAR with the new one.  Many methods have\r
+been deprecated in anticipation of release 2.0, so deprecation\r
+warnings are to be expected when upgrading from 1.4.3 to 1.9.\r
+\r
+Bug fixes\r
\r
+ 1. The fix that made IndexWriter.setMaxBufferedDocs(1) work had negative \r
+    effects on indexing performance and has thus been reverted. The \r
+    argument for setMaxBufferedDocs(int) must now at least be 2, otherwise\r
+    an exception is thrown. (Daniel Naber)\r
\r
+Optimizations\r
+     \r
+ 1. Optimized BufferedIndexOutput.writeBytes() to use\r
+    System.arraycopy() in more cases, rather than copying byte-by-byte.\r
+    (Lukas Zapletal via Cutting)\r
+\r
+1.9 RC1\r
+\r
+Requirements\r
+\r
+ 1. To compile and use Lucene you now need Java 1.4 or later.\r
+\r
+Changes in runtime behavior\r
+\r
+ 1. FuzzyQuery can no longer throw a TooManyClauses exception. If a\r
+    FuzzyQuery expands to more than BooleanQuery.maxClauseCount\r
+    terms only the BooleanQuery.maxClauseCount most similar terms\r
+    go into the rewritten query and thus the exception is avoided.\r
+    (Christoph)\r
+\r
+ 2. Changed system property from "org.apache.lucene.lockdir" to\r
+    "org.apache.lucene.lockDir", so that its casing follows the existing\r
+    pattern used in other Lucene system properties. (Bernhard)\r
+\r
+ 3. The terms of RangeQueries and FuzzyQueries are now converted to\r
+    lowercase by default (as it has been the case for PrefixQueries\r
+    and WildcardQueries before). Use setLowercaseExpandedTerms(false)\r
+    to disable that behavior but note that this also affects\r
+    PrefixQueries and WildcardQueries. (Daniel Naber)\r
+\r
+ 4. Document frequency that is computed when MultiSearcher is used is now\r
+    computed correctly and "globally" across subsearchers and indices, while\r
+    before it used to be computed locally to each index, which caused\r
+    ranking across multiple indices not to be equivalent.\r
+    (Chuck Williams, Wolf Siberski via Otis, bug #31841)\r
+\r
+ 5. When opening an IndexWriter with create=true, Lucene now only deletes\r
+    its own files from the index directory (looking at the file name suffixes\r
+    to decide if a file belongs to Lucene). The old behavior was to delete\r
+    all files. (Daniel Naber and Bernhard Messer, bug #34695)\r
+\r
+ 6. The version of an IndexReader, as returned by getCurrentVersion()\r
+    and getVersion() doesn't start at 0 anymore for new indexes. Instead, it\r
+    is now initialized by the system time in milliseconds.\r
+    (Bernhard Messer via Daniel Naber)\r
+\r
+ 7. Several default values cannot be set via system properties anymore, as\r
+    this has been considered inappropriate for a library like Lucene. For\r
+    most properties there are set/get methods available in IndexWriter which\r
+    you should use instead. This affects the following properties:\r
+    See IndexWriter for getter/setter methods:\r
+      org.apache.lucene.writeLockTimeout, org.apache.lucene.commitLockTimeout,\r
+      org.apache.lucene.minMergeDocs, org.apache.lucene.maxMergeDocs,\r
+      org.apache.lucene.maxFieldLength, org.apache.lucene.termIndexInterval,\r
+      org.apache.lucene.mergeFactor,\r
+    See BooleanQuery for getter/setter methods:\r
+      org.apache.lucene.maxClauseCount\r
+    See FSDirectory for getter/setter methods:\r
+      disableLuceneLocks\r
+    (Daniel Naber)\r
+\r
+ 8. Fixed FieldCacheImpl to use user-provided IntParser and FloatParser,\r
+    instead of using Integer and Float classes for parsing.\r
+    (Yonik Seeley via Otis Gospodnetic)\r
+\r
+ 9. Expert level search routines returning TopDocs and TopFieldDocs\r
+    no longer normalize scores.  This also fixes bugs related to\r
+    MultiSearchers and score sorting/normalization.\r
+    (Luc Vanlerberghe via Yonik Seeley, LUCENE-469)\r
+\r
+New features\r
+\r
+ 1. Added support for stored compressed fields (patch #31149)\r
+    (Bernhard Messer via Christoph)\r
+\r
+ 2. Added support for binary stored fields (patch #29370)\r
+    (Drew Farris and Bernhard Messer via Christoph)\r
+\r
+ 3. Added support for position and offset information in term vectors\r
+    (patch #18927). (Grant Ingersoll & Christoph)\r
+\r
+ 4. A new class DateTools has been added. It allows you to format dates\r
+    in a readable format adequate for indexing. Unlike the existing\r
+    DateField class DateTools can cope with dates before 1970 and it\r
+    forces you to specify the desired date resolution (e.g. month, day,\r
+    second, ...) which can make RangeQuerys on those fields more efficient.\r
+    (Daniel Naber)\r
+\r
+ 5. QueryParser now correctly works with Analyzers that can return more\r
+    than one token per position. For example, a query "+fast +car"\r
+    would be parsed as "+fast +(car automobile)" if the Analyzer\r
+    returns "car" and "automobile" at the same position whenever it\r
+    finds "car" (Patch #23307).\r
+    (Pierrick Brihaye, Daniel Naber)\r
+\r
+ 6. Permit unbuffered Directory implementations (e.g., using mmap).\r
+    InputStream is replaced by the new classes IndexInput and\r
+    BufferedIndexInput.  OutputStream is replaced by the new classes\r
+    IndexOutput and BufferedIndexOutput.  InputStream and OutputStream\r
+    are now deprecated and FSDirectory is now subclassable. (cutting)\r
+\r
+ 7. Add native Directory and TermDocs implementations that work under\r
+    GCJ.  These require GCC 3.4.0 or later and have only been tested\r
+    on Linux.  Use 'ant gcj' to build demo applications. (cutting)\r
+\r
+ 8. Add MMapDirectory, which uses nio to mmap input files.  This is\r
+    still somewhat slower than FSDirectory.  However it uses less\r
+    memory per query term, since a new buffer is not allocated per\r
+    term, which may help applications which use, e.g., wildcard\r
+    queries.  It may also someday be faster. (cutting & Paul Elschot)\r
+\r
+ 9. Added javadocs-internal to build.xml - bug #30360\r
+    (Paul Elschot via Otis)\r
+\r
+10. Added RangeFilter, a more generically useful filter than DateFilter.\r
+    (Chris M Hostetter via Erik)\r
+\r
+11. Added NumberTools, a utility class indexing numeric fields.\r
+    (adapted from code contributed by Matt Quail; committed by Erik)\r
+\r
+12. Added public static IndexReader.main(String[] args) method.\r
+    IndexReader can now be used directly at command line level\r
+    to list and optionally extract the individual files from an existing\r
+    compound index file.\r
+    (adapted from code contributed by Garrett Rooney; committed by Bernhard)\r
+\r
+13. Add IndexWriter.setTermIndexInterval() method.  See javadocs.\r
+    (Doug Cutting)\r
+\r
+14. Added LucenePackage, whose static get() method returns java.util.Package,\r
+    which lets the caller get the Lucene version information specified in\r
+    the Lucene Jar.\r
+    (Doug Cutting via Otis)\r
+\r
+15. Added Hits.iterator() method and corresponding HitIterator and Hit objects.\r
+    This provides standard java.util.Iterator iteration over Hits.\r
+    Each call to the iterator's next() method returns a Hit object.\r
+    (Jeremy Rayner via Erik)\r
+\r
+16. Add ParallelReader, an IndexReader that combines separate indexes\r
+    over different fields into a single virtual index.  (Doug Cutting)\r
+\r
+17. Add IntParser and FloatParser interfaces to FieldCache, so that\r
+    fields in arbitrarily formats can be cached as ints and floats.\r
+    (Doug Cutting)\r
+\r
+18. Added class org.apache.lucene.index.IndexModifier which combines\r
+    IndexWriter and IndexReader, so you can add and delete documents without\r
+    worrying about synchronization/locking issues.\r
+    (Daniel Naber)\r
+\r
+19. Lucene can now be used inside an unsigned applet, as Lucene's access\r
+    to system properties will not cause a SecurityException anymore.\r
+    (Jon Schuster via Daniel Naber, bug #34359)\r
+\r
+20. Added a new class MatchAllDocsQuery that matches all documents.\r
+    (John Wang via Daniel Naber, bug #34946)\r
+\r
+21. Added ability to omit norms on a per field basis to decrease\r
+    index size and memory consumption when there are many indexed fields.\r
+    See Field.setOmitNorms()\r
+    (Yonik Seeley, LUCENE-448)\r
+\r
+22. Added NullFragmenter to contrib/highlighter, which is useful for\r
+    highlighting entire documents or fields.\r
+    (Erik Hatcher)\r
+\r
+23. Added regular expression queries, RegexQuery and SpanRegexQuery.\r
+    Note the same term enumeration caveats apply with these queries as\r
+    apply to WildcardQuery and other term expanding queries.\r
+    These two new queries are not currently supported via QueryParser.\r
+    (Erik Hatcher)\r
+\r
+24. Added ConstantScoreQuery which wraps a filter and produces a score\r
+    equal to the query boost for every matching document.\r
+    (Yonik Seeley, LUCENE-383)\r
+\r
+25. Added ConstantScoreRangeQuery which produces a constant score for\r
+    every document in the range.  One advantage over a normal RangeQuery\r
+    is that it doesn't expand to a BooleanQuery and thus doesn't have a maximum\r
+    number of terms the range can cover.  Both endpoints may also be open.\r
+    (Yonik Seeley, LUCENE-383)\r
+\r
+26. Added ability to specify a minimum number of optional clauses that\r
+    must match in a BooleanQuery.  See BooleanQuery.setMinimumNumberShouldMatch().\r
+    (Paul Elschot, Chris Hostetter via Yonik Seeley, LUCENE-395)\r
+\r
+27. Added DisjunctionMaxQuery which provides the maximum score across its clauses.\r
+    It's very useful for searching across multiple fields.\r
+    (Chuck Williams via Yonik Seeley, LUCENE-323)\r
+\r
+28. New class ISOLatin1AccentFilter that replaces accented characters in the ISO\r
+    Latin 1 character set by their unaccented equivalent.\r
+    (Sven Duzont via Erik Hatcher)\r
+\r
+29. New class KeywordAnalyzer. "Tokenizes" the entire stream as a single token.\r
+    This is useful for data like zip codes, ids, and some product names.\r
+    (Erik Hatcher)\r
+\r
+30. Copied LengthFilter from contrib area to core. Removes words that are too\r
+    long and too short from the stream.\r
+    (David Spencer via Otis and Daniel)\r
+\r
+31. Added getPositionIncrementGap(String fieldName) to Analyzer.  This allows\r
+    custom analyzers to put gaps between Field instances with the same field\r
+    name, preventing phrase or span queries crossing these boundaries.  The\r
+    default implementation issues a gap of 0, allowing the default token\r
+    position increment of 1 to put the next field's first token into a\r
+    successive position.\r
+    (Erik Hatcher, with advice from Yonik)\r
+\r
+32. StopFilter can now ignore case when checking for stop words.\r
+    (Grant Ingersoll via Yonik, LUCENE-248)\r
+\r
+33. Add TopDocCollector and TopFieldDocCollector.  These simplify the\r
+    implementation of hit collectors that collect only the\r
+    top-scoring or top-sorting hits.\r
+\r
+API Changes\r
+\r
+ 1. Several methods and fields have been deprecated. The API documentation\r
+    contains information about the recommended replacements. It is planned\r
+    that most of the deprecated methods and fields will be removed in\r
+    Lucene 2.0. (Daniel Naber)\r
+\r
+ 2. The Russian and the German analyzers have been moved to contrib/analyzers.\r
+    Also, the WordlistLoader class has been moved one level up in the\r
+    hierarchy and is now org.apache.lucene.analysis.WordlistLoader\r
+    (Daniel Naber)\r
+\r
+ 3. The API contained methods that declared to throw an IOException\r
+    but that never did this. These declarations have been removed. If\r
+    your code tries to catch these exceptions you might need to remove\r
+    those catch clauses to avoid compile errors. (Daniel Naber)\r
+\r
+ 4. Add a serializable Parameter Class to standardize parameter enum\r
+    classes in BooleanClause and Field. (Christoph)\r
+\r
+ 5. Added rewrite methods to all SpanQuery subclasses that nest other SpanQuerys.\r
+    This allows custom SpanQuery subclasses that rewrite (for term expansion, for\r
+    example) to nest within the built-in SpanQuery classes successfully.\r
+\r
+Bug fixes\r
+\r
+ 1. The JSP demo page (src/jsp/results.jsp) now properly closes the\r
+    IndexSearcher it opens. (Daniel Naber)\r
+\r
+ 2. Fixed a bug in IndexWriter.addIndexes(IndexReader[] readers) that\r
+    prevented deletion of obsolete segments. (Christoph Goller)\r
+\r
+ 3. Fix in FieldInfos to avoid the return of an extra blank field in\r
+    IndexReader.getFieldNames() (Patch #19058). (Mark Harwood via Bernhard)\r
+\r
+ 4. Some combinations of BooleanQuery and MultiPhraseQuery (formerly\r
+    PhrasePrefixQuery) could provoke UnsupportedOperationException\r
+    (bug #33161). (Rhett Sutphin via Daniel Naber)\r
+\r
+ 5. Small bug in skipTo of ConjunctionScorer that caused NullPointerException\r
+    if skipTo() was called without prior call to next() fixed. (Christoph)\r
+\r
+ 6. Disable Similiarty.coord() in the scoring of most automatically\r
+    generated boolean queries.  The coord() score factor is\r
+    appropriate when clauses are independently specified by a user,\r
+    but is usually not appropriate when clauses are generated\r
+    automatically, e.g., by a fuzzy, wildcard or range query.  Matches\r
+    on such automatically generated queries are no longer penalized\r
+    for not matching all terms.  (Doug Cutting, Patch #33472)\r
+\r
+ 7. Getting a lock file with Lock.obtain(long) was supposed to wait for\r
+    a given amount of milliseconds, but this didn't work.\r
+    (John Wang via Daniel Naber, Bug #33799)\r
+\r
+ 8. Fix FSDirectory.createOutput() to always create new files.\r
+    Previously, existing files were overwritten, and an index could be\r
+    corrupted when the old version of a file was longer than the new.\r
+    Now any existing file is first removed.  (Doug Cutting)\r
+\r
+ 9. Fix BooleanQuery containing nested SpanTermQuery's, which previously\r
+    could return an incorrect number of hits.\r
+    (Reece Wilton via Erik Hatcher, Bug #35157)\r
+\r
+10. Fix NullPointerException that could occur with a MultiPhraseQuery\r
+    inside a BooleanQuery.\r
+    (Hans Hjelm and Scotty Allen via Daniel Naber, Bug #35626)\r
+\r
+11. Fixed SnowballFilter to pass through the position increment from\r
+    the original token.\r
+    (Yonik Seeley via Erik Hatcher, LUCENE-437)\r
+\r
+12. Added Unicode range of Korean characters to StandardTokenizer,\r
+    grouping contiguous characters into a token rather than one token\r
+    per character.  This change also changes the token type to "<CJ>"\r
+    for Chinese and Japanese character tokens (previously it was "<CJK>").\r
+    (Cheolgoo Kang via Otis and Erik, LUCENE-444 and LUCENE-461)\r
+\r
+13. FieldsReader now looks at FieldInfo.storeOffsetWithTermVector and\r
+    FieldInfo.storePositionWithTermVector and creates the Field with\r
+    correct TermVector parameter.\r
+    (Frank Steinmann via Bernhard, LUCENE-455)\r
+\r
+14. Fixed WildcardQuery to prevent "cat" matching "ca??".\r
+    (Xiaozheng Ma via Bernhard, LUCENE-306)\r
+\r
+15. Fixed a bug where MultiSearcher and ParallelMultiSearcher could\r
+    change the sort order when sorting by string for documents without\r
+    a value for the sort field.\r
+    (Luc Vanlerberghe via Yonik, LUCENE-453)\r
+\r
+16. Fixed a sorting problem with MultiSearchers that can lead to\r
+    missing or duplicate docs due to equal docs sorting in an arbitrary order.\r
+    (Yonik Seeley, LUCENE-456)\r
+\r
+17. A single hit using the expert level sorted search methods\r
+    resulted in the score not being normalized.\r
+    (Yonik Seeley, LUCENE-462)\r
+\r
+18. Fixed inefficient memory usage when loading an index into RAMDirectory.\r
+    (Volodymyr Bychkoviak via Bernhard, LUCENE-475)\r
+\r
+19. Corrected term offsets returned by ChineseTokenizer.\r
+    (Ray Tsang via Erik Hatcher, LUCENE-324)\r
+\r
+20. Fixed MultiReader.undeleteAll() to correctly update numDocs.\r
+    (Robert Kirchgessner via Doug Cutting, LUCENE-479)\r
+\r
+21. Race condition in IndexReader.getCurrentVersion() and isCurrent()\r
+    fixed by acquiring the commit lock.\r
+    (Luc Vanlerberghe via Yonik Seeley, LUCENE-481)\r
+\r
+22. IndexWriter.setMaxBufferedDocs(1) didn't have the expected effect,\r
+    this has now been fixed. (Daniel Naber)\r
+\r
+23. Fixed QueryParser when called with a date in local form like \r
+    "[1/16/2000 TO 1/18/2000]". This query did not include the documents\r
+    of 1/18/2000, i.e. the last day was not included. (Daniel Naber)\r
+\r
+24. Removed sorting constraint that threw an exception if there were\r
+    not yet any values for the sort field (Yonik Seeley, LUCENE-374)\r
+\r
+Optimizations\r
+     \r
+ 1. Disk usage (peak requirements during indexing and optimization)\r
+    in case of compound file format has been improved. \r
+    (Bernhard, Dmitry, and Christoph)\r
+\r
+ 2. Optimize the performance of certain uses of BooleanScorer,\r
+    TermScorer and IndexSearcher.  In particular, a BooleanQuery\r
+    composed of TermQuery, with not all terms required, that returns a\r
+    TopDocs (e.g., through a Hits with no Sort specified) runs much\r
+    faster.  (cutting)\r
+    \r
+ 3. Removed synchronization from reading of term vectors with an\r
+    IndexReader (Patch #30736). (Bernhard Messer via Christoph)\r
+\r
+ 4. Optimize term-dictionary lookup to allocate far fewer terms when\r
+    scanning for the matching term.  This speeds searches involving\r
+    low-frequency terms, where the cost of dictionary lookup can be\r
+    significant. (cutting)\r
+\r
+ 5. Optimize fuzzy queries so the standard fuzzy queries with a prefix \r
+    of 0 now run 20-50% faster (Patch #31882).\r
+    (Jonathan Hager via Daniel Naber)\r
+    \r
+ 6. A Version of BooleanScorer (BooleanScorer2) added that delivers\r
+    documents in increasing order and implements skipTo. For queries\r
+    with required or forbidden clauses it may be faster than the old\r
+    BooleanScorer, for BooleanQueries consisting only of optional\r
+    clauses it is probably slower. The new BooleanScorer is now the\r
+    default. (Patch 31785 by Paul Elschot via Christoph)\r
+\r
+ 7. Use uncached access to norms when merging to reduce RAM usage.\r
+    (Bug #32847).  (Doug Cutting)\r
+\r
+ 8. Don't read term index when random-access is not required.  This\r
+    reduces time to open IndexReaders and they use less memory when\r
+    random access is not required, e.g., when merging segments.  The\r
+    term index is now read into memory lazily at the first\r
+    random-access.  (Doug Cutting)\r
+\r
+ 9. Optimize IndexWriter.addIndexes(Directory[]) when the number of\r
+    added indexes is larger than mergeFactor.  Previously this could\r
+    result in quadratic performance.  Now performance is n log(n).\r
+    (Doug Cutting)\r
+\r
+10. Speed up the creation of TermEnum for indices with multiple\r
+    segments and deleted documents, and thus speed up PrefixQuery,\r
+    RangeQuery, WildcardQuery, FuzzyQuery, RangeFilter, DateFilter,\r
+    and sorting the first time on a field.\r
+    (Yonik Seeley, LUCENE-454)\r
+\r
+11. Optimized and generalized 32 bit floating point to byte\r
+    (custom 8 bit floating point) conversions.  Increased the speed of\r
+    Similarity.encodeNorm() anywhere from 10% to 250%, depending on the JVM.\r
+    (Yonik Seeley, LUCENE-467)\r
+\r
+Infrastructure\r
+\r
+ 1. Lucene's source code repository has converted from CVS to\r
+    Subversion.  The new repository is at\r
+    http://svn.apache.org/repos/asf/lucene/java/trunk\r
+\r
+ 2. Lucene's issue tracker has migrated from Bugzilla to JIRA.\r
+    Lucene's JIRA is at http://issues.apache.org/jira/browse/LUCENE\r
+    The old issues are still available at\r
+    http://issues.apache.org/bugzilla/show_bug.cgi?id=xxxx\r
+    (use the bug number instead of xxxx)\r
+\r
+\r
+1.4.3\r
+\r
+ 1. The JSP demo page (src/jsp/results.jsp) now properly escapes error\r
+    messages which might contain user input (e.g. error messages about \r
+    query parsing). If you used that page as a starting point for your\r
+    own code please make sure your code also properly escapes HTML\r
+    characters from user input in order to avoid so-called cross site\r
+    scripting attacks. (Daniel Naber)\r
+  \r
+  2. QueryParser changes in 1.4.2 broke the QueryParser API. Now the old \r
+     API is supported again. (Christoph)\r
+\r
+\r
+1.4.2\r
+\r
+ 1. Fixed bug #31241: Sorting could lead to incorrect results (documents\r
+    missing, others duplicated) if the sort keys were not unique and there\r
+    were more than 100 matches. (Daniel Naber)\r
+\r
+ 2. Memory leak in Sort code (bug #31240) eliminated.\r
+    (Rafal Krzewski via Christoph and Daniel)\r
+    \r
+ 3. FuzzyQuery now takes an additional parameter that specifies the\r
+    minimum similarity that is required for a term to match the query.\r
+    The QueryParser syntax for this is term~x, where x is a floating \r
+    point number >= 0 and < 1 (a bigger number means that a higher\r
+    similarity is required). Furthermore, a prefix can be specified\r
+    for FuzzyQuerys so that only those terms are considered similar that \r
+    start with this prefix. This can speed up FuzzyQuery greatly.\r
+    (Daniel Naber, Christoph Goller)\r
+    \r
+ 4. PhraseQuery and PhrasePrefixQuery now allow the explicit specification\r
+    of relative positions. (Christoph Goller)\r
+    \r
+ 5. QueryParser changes: Fix for ArrayIndexOutOfBoundsExceptions \r
+    (patch #9110); some unused method parameters removed; The ability\r
+    to specify a minimum similarity for FuzzyQuery has been added.\r
+    (Christoph Goller)\r
+\r
+ 6. IndexSearcher optimization: a new ScoreDoc is no longer allocated\r
+    for every non-zero-scoring hit.  This makes 'OR' queries that\r
+    contain common terms substantially faster.  (cutting)\r
+\r
+\r
+1.4.1\r
+\r
+ 1. Fixed a performance bug in hit sorting code, where values were not\r
+    correctly cached.  (Aviran via cutting)\r
+\r
+ 2. Fixed errors in file format documentation. (Daniel Naber)\r
+\r
+\r
+1.4 final\r
+\r
+ 1. Added "an" to the list of stop words in StopAnalyzer, to complement\r
+    the existing "a" there.  Fix for bug 28960\r
+     (http://issues.apache.org/bugzilla/show_bug.cgi?id=28960). (Otis)\r
+\r
+ 2. Added new class FieldCache to manage in-memory caches of field term\r
+    values.  (Tim Jones)\r
+\r
+ 3. Added overloaded getFieldQuery method to QueryParser which\r
+    accepts the slop factor specified for the phrase (or the default\r
+    phrase slop for the QueryParser instance).  This allows overriding\r
+    methods to replace a PhraseQuery with a SpanNearQuery instead,\r
+    keeping the proper slop factor. (Erik Hatcher)\r
+\r
+ 4. Changed the encoding of GermanAnalyzer.java and GermanStemmer.java to\r
+    UTF-8 and changed the build encoding to UTF-8, to make changed files\r
+    compile. (Otis Gospodnetic)\r
+\r
+ 5. Removed synchronization from term lookup under IndexReader methods\r
+    termFreq(), termDocs() or termPositions() to improve\r
+    multi-threaded performance.  (cutting)\r
+\r
+ 6. Fix a bug where obsolete segment files were not deleted on Win32.\r
+\r
+\r
+1.4 RC3\r
+\r
+ 1. Fixed several search bugs introduced by the skipTo() changes in\r
+    release 1.4RC1.  The index file format was changed a bit, so\r
+    collections must be re-indexed to take advantage of the skipTo()\r
+    optimizations.  (Christoph Goller)\r
+\r
+ 2. Added new Document methods, removeField() and removeFields().\r
+    (Christoph Goller)\r
+\r
+ 3. Fixed inconsistencies with index closing.  Indexes and directories\r
+    are now only closed automatically by Lucene when Lucene opened\r
+    them automatically.  (Christoph Goller)\r
+\r
+ 4. Added new class: FilteredQuery.  (Tim Jones)\r
+\r
+ 5. Added a new SortField type for custom comparators.  (Tim Jones)\r
+\r
+ 6. Lock obtain timed out message now displays the full path to the lock\r
+    file. (Daniel Naber via Erik)\r
+\r
+ 7. Fixed a bug in SpanNearQuery when ordered. (Paul Elschot via cutting)\r
+\r
+ 8. Fixed so that FSDirectory's locks still work when the\r
+    java.io.tmpdir system property is null.  (cutting)\r
+\r
+ 9. Changed FilteredTermEnum's constructor to take no parameters,\r
+    as the parameters were ignored anyway (bug #28858)\r
+\r
+1.4 RC2\r
+\r
+ 1. GermanAnalyzer now throws an exception if the stopword file\r
+    cannot be found (bug #27987). It now uses LowerCaseFilter\r
+    (bug #18410) (Daniel Naber via Otis, Erik)\r
+\r
+ 2. Fixed a few bugs in the file format documentation. (cutting)\r
+\r
+\r
+1.4 RC1\r
+\r
+ 1. Changed the format of the .tis file, so that:\r
+\r
+    - it has a format version number, which makes it easier to\r
+      back-compatibly change file formats in the future.\r
+\r
+    - the term count is now stored as a long.  This was the one aspect\r
+      of the Lucene's file formats which limited index size.\r
+\r
+    - a few internal index parameters are now stored in the index, so\r
+      that they can (in theory) now be changed from index to index,\r
+      although there is not yet an API to do so.\r
+\r
+    These changes are back compatible.  The new code can read old\r
+    indexes.  But old code will not be able read new indexes. (cutting)\r
+\r
+ 2. Added an optimized implementation of TermDocs.skipTo().  A skip\r
+    table is now stored for each term in the .frq file.  This only\r
+    adds a percent or two to overall index size, but can substantially\r
+    speedup many searches.  (cutting)\r
+\r
+ 3. Restructured the Scorer API and all Scorer implementations to take\r
+    advantage of an optimized TermDocs.skipTo() implementation.  In\r
+    particular, PhraseQuerys and conjunctive BooleanQuerys are\r
+    faster when one clause has substantially fewer matches than the\r
+    others.  (A conjunctive BooleanQuery is a BooleanQuery where all\r
+    clauses are required.)  (cutting)\r
+\r
+ 4. Added new class ParallelMultiSearcher.  Combined with\r
+    RemoteSearchable this makes it easy to implement distributed\r
+    search systems.  (Jean-Francois Halleux via cutting)\r
+\r
+ 5. Added support for hit sorting.  Results may now be sorted by any\r
+    indexed field.  For details see the javadoc for\r
+    Searcher#search(Query, Sort).  (Tim Jones via Cutting)\r
+\r
+ 6. Changed FSDirectory to auto-create a full directory tree that it\r
+    needs by using mkdirs() instead of mkdir().  (Mladen Turk via Otis)\r
+\r
+ 7. Added a new span-based query API.  This implements, among other\r
+    things, nested phrases.  See javadocs for details.  (Doug Cutting)\r
+\r
+ 8. Added new method Query.getSimilarity(Searcher), and changed\r
+    scorers to use it.  This permits one to subclass a Query class so\r
+    that it can specify its own Similarity implementation, perhaps\r
+    one that delegates through that of the Searcher.  (Julien Nioche\r
+    via Cutting)\r
+\r
+ 9. Added MultiReader, an IndexReader that combines multiple other\r
+    IndexReaders.  (Cutting)\r
+\r
+10. Added support for term vectors.  See Field#isTermVectorStored().\r
+    (Grant Ingersoll, Cutting & Dmitry)\r
+\r
+11. Fixed the old bug with escaping of special characters in query\r
+    strings: http://issues.apache.org/bugzilla/show_bug.cgi?id=24665\r
+    (Jean-Francois Halleux via Otis)\r
+\r
+12. Added support for overriding default values for the following,\r
+    using system properties:\r
+      - default commit lock timeout\r
+      - default maxFieldLength\r
+      - default maxMergeDocs\r
+      - default mergeFactor\r
+      - default minMergeDocs\r
+      - default write lock timeout\r
+    (Otis)\r
+\r
+13. Changed QueryParser.jj to allow '-' and '+' within tokens:\r
+    http://issues.apache.org/bugzilla/show_bug.cgi?id=27491\r
+    (Morus Walter via Otis)\r
+\r
+14. Changed so that the compound index format is used by default.\r
+    This makes indexing a bit slower, but vastly reduces the chances\r
+    of file handle problems.  (Cutting)\r
+\r
+\r
+1.3 final\r
+\r
+ 1. Added catch of BooleanQuery$TooManyClauses in QueryParser to\r
+    throw ParseException instead. (Erik Hatcher)\r
+\r
+ 2. Fixed a NullPointerException in Query.explain(). (Doug Cutting)\r
+\r
+ 3. Added a new method IndexReader.setNorm(), that permits one to\r
+    alter the boosting of fields after an index is created.\r
+\r
+ 4. Distinguish between the final position and length when indexing a\r
+    field.  The length is now defined as the total number of tokens,\r
+    instead of the final position, as it was previously.  Length is\r
+    used for score normalization (Similarity.lengthNorm()) and for\r
+    controlling memory usage (IndexWriter.maxFieldLength).  In both of\r
+    these cases, the total number of tokens is a better value to use\r
+    than the final token position.  Position is used in phrase\r
+    searching (see PhraseQuery and Token.setPositionIncrement()).\r
+\r
+ 5. Fix StandardTokenizer's handling of CJK characters (Chinese,\r
+    Japanese and Korean ideograms).  Previously contiguous sequences\r
+    were combined in a single token, which is not very useful.  Now\r
+    each ideogram generates a separate token, which is more useful.\r
+\r
+\r
+1.3 RC3\r
+\r
+ 1. Added minMergeDocs in IndexWriter.  This can be raised to speed\r
+    indexing without altering the number of files, but only using more\r
+    memory.  (Julien Nioche via Otis)\r
+\r
+ 2. Fix bug #24786, in query rewriting. (bschneeman via Cutting)\r
+\r
+ 3. Fix bug #16952, in demo HTML parser, skip comments in\r
+    javascript. (Christoph Goller)\r
+\r
+ 4. Fix bug #19253, in demo HTML parser, add whitespace as needed to\r
+    output (Daniel Naber via Christoph Goller)\r
+\r
+ 5. Fix bug #24301, in demo HTML parser, long titles no longer\r
+    hang things. (Christoph Goller)\r
+\r
+ 6. Fix bug #23534, Replace use of file timestamp of segments file\r
+    with an index version number stored in the segments file.  This\r
+    resolves problems when running on file systems with low-resolution\r
+    timestamps, e.g., HFS under MacOS X.  (Christoph Goller)\r
+\r
+ 7. Fix QueryParser so that TokenMgrError is not thrown, only\r
+    ParseException.  (Erik Hatcher)\r
+\r
+ 8. Fix some bugs introduced by change 11 of RC2.  (Christoph Goller)\r
+\r
+ 9. Fixed a problem compiling TestRussianStem.  (Christoph Goller)\r
+\r
+10. Cleaned up some build stuff.  (Erik Hatcher)\r
+\r
+\r
+1.3 RC2\r
+\r
+ 1. Added getFieldNames(boolean) to IndexReader, SegmentReader, and\r
+    SegmentsReader. (Julien Nioche via otis)\r
+\r
+ 2. Changed file locking to place lock files in\r
+    System.getProperty("java.io.tmpdir"), where all users are\r
+    permitted to write files.  This way folks can open and correctly\r
+    lock indexes which are read-only to them.\r
+\r
+ 3. IndexWriter: added a new method, addDocument(Document, Analyzer),\r
+    permitting one to easily use different analyzers for different\r
+    documents in the same index.\r
+\r
+ 4. Minor enhancements to FuzzyTermEnum.\r
+    (Christoph Goller via Otis)\r
+\r
+ 5. PriorityQueue: added insert(Object) method and adjusted IndexSearcher\r
+    and MultiIndexSearcher to use it.\r
+    (Christoph Goller via Otis)\r
+\r
+ 6. Fixed a bug in IndexWriter that returned incorrect docCount().\r
+    (Christoph Goller via Otis)\r
+\r
+ 7. Fixed SegmentsReader to eliminate the confusing and slightly different\r
+    behaviour of TermEnum when dealing with an enumeration of all terms,\r
+    versus an enumeration starting from a specific term.\r
+    This patch also fixes incorrect term document frequencies when the same term\r
+    is present in multiple segments.\r
+    (Christoph Goller via Otis)\r
+\r
+ 8. Added CachingWrapperFilter and PerFieldAnalyzerWrapper. (Erik Hatcher)\r
+\r
+ 9. Added support for the new "compound file" index format (Dmitry\r
+    Serebrennikov)\r
+\r
+10. Added Locale setting to QueryParser, for use by date range parsing.\r
+\r
+11. Changed IndexReader so that it can be subclassed by classes\r
+    outside of its package.  Previously it had package-private\r
+    abstract methods.  Also modified the index merging code so that it\r
+    can work on an arbitrary IndexReader implementation, and added a\r
+    new method, IndexWriter.addIndexes(IndexReader[]), to take\r
+    advantage of this. (cutting)\r
+\r
+12. Added a limit to the number of clauses which may be added to a\r
+    BooleanQuery.  The default limit is 1024 clauses.  This should\r
+    stop most OutOfMemoryExceptions by prefix, wildcard and fuzzy\r
+    queries which run amok. (cutting)\r
+\r
+13. Add new method: IndexReader.undeleteAll().  This undeletes all\r
+    deleted documents which still remain in the index. (cutting)\r
+\r
+\r
+1.3 RC1\r
+\r
+ 1. Fixed PriorityQueue's clear() method.\r
+    Fix for bug 9454, http://nagoya.apache.org/bugzilla/show_bug.cgi?id=9454\r
+    (Matthijs Bomhoff via otis)\r
+\r
+ 2. Changed StandardTokenizer.jj grammar for EMAIL tokens.\r
+    Fix for bug 9015, http://nagoya.apache.org/bugzilla/show_bug.cgi?id=9015\r
+    (Dale Anson via otis)\r
+\r
+ 3. Added the ability to disable lock creation by using disableLuceneLocks\r
+    system property.  This is useful for read-only media, such as CD-ROMs.\r
+    (otis)\r
+\r
+ 4. Added id method to Hits to be able to access the index global id.\r
+    Required for sorting options.\r
+    (carlson)\r
+\r
+ 5. Added support for new range query syntax to QueryParser.jj.\r
+    (briangoetz)\r
+\r
+ 6. Added the ability to retrieve HTML documents' META tag values to\r
+    HTMLParser.jj.\r
+    (Mark Harwood via otis)\r
+\r
+ 7. Modified QueryParser to make it possible to programmatically specify the\r
+    default Boolean operator (OR or AND).\r
+    (Pรฉter Halรกcsy via otis)\r
+\r
+ 8. Made many search methods and classes non-final, per requests.\r
+    This includes IndexWriter and IndexSearcher, among others.\r
+    (cutting)\r
+\r
+ 9. Added class RemoteSearchable, providing support for remote\r
+    searching via RMI.  The test class RemoteSearchableTest.java\r
+    provides an example of how this can be used.  (cutting)\r
+\r
+ 10. Added PhrasePrefixQuery (and supporting MultipleTermPositions).  The\r
+     test class TestPhrasePrefixQuery provides the usage example.\r
+     (Anders Nielsen via otis)\r
+\r
+ 11. Changed the German stemming algorithm to ignore case while\r
+     stripping. The new algorithm is faster and produces more equal\r
+     stems from nouns and verbs derived from the same word.\r
+     (gschwarz)\r
+\r
+ 12. Added support for boosting the score of documents and fields via\r
+     the new methods Document.setBoost(float) and Field.setBoost(float).\r
+\r
+     Note: This changes the encoding of an indexed value.  Indexes\r
+     should be re-created from scratch in order for search scores to\r
+     be correct.  With the new code and an old index, searches will\r
+     yield very large scores for shorter fields, and very small scores\r
+     for longer fields.  Once the index is re-created, scores will be\r
+     as before. (cutting)\r
+\r
+ 13. Added new method Token.setPositionIncrement().\r
+\r
+     This permits, for the purpose of phrase searching, placing\r
+     multiple terms in a single position.  This is useful with\r
+     stemmers that produce multiple possible stems for a word.\r
+\r
+     This also permits the introduction of gaps between terms, so that\r
+     terms which are adjacent in a token stream will not be matched by\r
+     and exact phrase query.  This makes it possible, e.g., to build\r
+     an analyzer where phrases are not matched over stop words which\r
+     have been removed.\r
+\r
+     Finally, repeating a token with an increment of zero can also be\r
+     used to boost scores of matches on that token.  (cutting)\r
+\r
+ 14. Added new Filter class, QueryFilter.  This constrains search\r
+     results to only match those which also match a provided query.\r
+     Results are cached, so that searches after the first on the same\r
+     index using this filter are very fast.\r
+\r
+     This could be used, for example, with a RangeQuery on a formatted\r
+     date field to implement date filtering.  One could re-use a\r
+     single QueryFilter that matches, e.g., only documents modified\r
+     within the last week.  The QueryFilter and RangeQuery would only\r
+     need to be reconstructed once per day. (cutting)\r
+\r
+ 15. Added a new IndexWriter method, getAnalyzer().  This returns the\r
+     analyzer used when adding documents to this index. (cutting)\r
+\r
+ 16. Fixed a bug with IndexReader.lastModified().  Before, document\r
+     deletion did not update this.  Now it does.  (cutting)\r
+\r
+ 17. Added Russian Analyzer.\r
+     (Boris Okner via otis)\r
+\r
+ 18. Added a public, extensible scoring API.  For details, see the\r
+     javadoc for org.apache.lucene.search.Similarity.\r
+\r
+ 19. Fixed return of Hits.id() from float to int. (Terry Steichen via Peter).\r
+\r
+ 20. Added getFieldNames() to IndexReader and Segment(s)Reader classes.\r
+     (Peter Mularien via otis)\r
+\r
+ 21. Added getFields(String) and getValues(String) methods.\r
+     Contributed by Rasik Pandey on 2002-10-09\r
+     (Rasik Pandey via otis)\r
+\r
+ 22. Revised internal search APIs.  Changes include:\r
+\r
+       a. Queries are no longer modified during a search.  This makes\r
+       it possible, e.g., to reuse the same query instance with\r
+       multiple indexes from multiple threads.\r
+\r
+       b. Term-expanding queries (e.g. PrefixQuery, WildcardQuery,\r
+       etc.)  now work correctly with MultiSearcher, fixing bugs 12619\r
+       and 12667.\r
+\r
+       c. Boosting BooleanQuery's now works, and is supported by the\r
+       query parser (problem reported by Lee Mallabone).  Thus a query\r
+       like "(+foo +bar)^2 +baz" is now supported and equivalent to\r
+       "(+foo^2 +bar^2) +baz".\r
+\r
+       d. New method: Query.rewrite(IndexReader).  This permits a\r
+       query to re-write itself as an alternate, more primitive query.\r
+       Most of the term-expanding query classes (PrefixQuery,\r
+       WildcardQuery, etc.) are now implemented using this method.\r
+\r
+       e. New method: Searchable.explain(Query q, int doc).  This\r
+       returns an Explanation instance that describes how a particular\r
+       document is scored against a query.  An explanation can be\r
+       displayed as either plain text, with the toString() method, or\r
+       as HTML, with the toHtml() method.  Note that computing an\r
+       explanation is as expensive as executing the query over the\r
+       entire index.  This is intended to be used in developing\r
+       Similarity implementations, and, for good performance, should\r
+       not be displayed with every hit.\r
+\r
+       f. Scorer and Weight are public, not package protected.  It now\r
+       possible for someone to write a Scorer implementation that is\r
+       not in the org.apache.lucene.search package.  This is still\r
+       fairly advanced programming, and I don't expect anyone to do\r
+       this anytime soon, but at least now it is possible.\r
+\r
+       g. Added public accessors to the primitive query classes\r
+       (TermQuery, PhraseQuery and BooleanQuery), permitting access to\r
+       their terms and clauses.\r
+\r
+     Caution: These are extensive changes and they have not yet been\r
+     tested extensively.  Bug reports are appreciated.\r
+     (cutting)\r
+\r
+ 23. Added convenience RAMDirectory constructors taking File and String\r
+     arguments, for easy FSDirectory to RAMDirectory conversion.\r
+     (otis)\r
+\r
+ 24. Added code for manual renaming of files in FSDirectory, since it\r
+     has been reported that java.io.File's renameTo(File) method sometimes\r
+     fails on Windows JVMs.\r
+     (Matt Tucker via otis)\r
+\r
+ 25. Refactored QueryParser to make it easier for people to extend it.\r
+     Added the ability to automatically lower-case Wildcard terms in\r
+     the QueryParser.\r
+     (Tatu Saloranta via otis)\r
+\r
+\r
+1.2 RC6\r
+\r
+ 1. Changed QueryParser.jj to have "?" be a special character which\r
+    allowed it to be used as a wildcard term. Updated TestWildcard\r
+    unit test also. (Ralf Hettesheimer via carlson)\r
+\r
+1.2 RC5\r
+\r
+ 1. Renamed build.properties to default.properties and updated\r
+    the BUILD.txt document to describe how to override the\r
+    default.property settings without having to edit the file. This\r
+    brings the build process closer to Scarab's build process.\r
+    (jon)\r
+\r
+ 2. Added MultiFieldQueryParser class. (Kelvin Tan, via otis)\r
+\r
+ 3. Updated "powered by" links. (otis)\r
+\r
+ 4. Fixed instruction for setting up JavaCC - Bug #7017 (otis)\r
+\r
+ 5. Added throwing exception if FSDirectory could not create directory\r
+    - Bug #6914 (Eugene Gluzberg via otis)\r
+\r
+ 6. Update MultiSearcher, MultiFieldParse, Constants, DateFilter,\r
+    LowerCaseTokenizer javadoc (otis)\r
+\r
+ 7. Added fix to avoid NullPointerException in results.jsp\r
+    (Mark Hayes via otis)\r
+\r
+ 8. Changed Wildcard search to find 0 or more char instead of 1 or more\r
+    (Lee Mallobone, via otis)\r
+\r
+ 9. Fixed error in offset issue in GermanStemFilter - Bug #7412\r
+    (Rodrigo Reyes, via otis)\r
+\r
+ 10. Added unit tests for wildcard search and DateFilter (otis)\r
+\r
+ 11. Allow co-existence of indexed and non-indexed fields with the same name\r
+     (cutting/casper, via otis)\r
+\r
+ 12. Add escape character to query parser.\r
+     (briangoetz)\r
+\r
+ 13. Applied a patch that ensures that searches that use DateFilter\r
+     don't throw an exception when no matches are found. (David Smiley, via\r
+     otis)\r
+\r
+ 14. Fixed bugs in DateFilter and wildcardquery unit tests. (cutting, otis, carlson)\r
+\r
+\r
+1.2 RC4\r
+\r
+ 1. Updated contributions section of website.\r
+    Add XML Document #3 implementation to Document Section.\r
+    Also added Term Highlighting to Misc Section. (carlson)\r
+\r
+ 2. Fixed NullPointerException for phrase searches containing\r
+    unindexed terms, introduced in 1.2RC3.  (cutting)\r
+\r
+ 3. Changed document deletion code to obtain the index write lock,\r
+    enforcing the fact that document addition and deletion cannot be\r
+    performed concurrently.  (cutting)\r
+\r
+ 4. Various documentation cleanups.  (otis, acoliver)\r
+\r
+ 5. Updated "powered by" links.  (cutting, jon)\r
+\r
+ 6. Fixed a bug in the GermanStemmer.  (Bernhard Messer, via otis)\r
+\r
+ 7. Changed Term and Query to implement Serializable.  (scottganyo)\r
+\r
+ 8. Fixed to never delete indexes added with IndexWriter.addIndexes().\r
+    (cutting)\r
+\r
+ 9. Upgraded to JUnit 3.7. (otis)\r
+\r
+1.2 RC3\r
+\r
+ 1. IndexWriter: fixed a bug where adding an optimized index to an\r
+    empty index failed.  This was encountered using addIndexes to copy\r
+    a RAMDirectory index to an FSDirectory.\r
+\r
+ 2. RAMDirectory: fixed a bug where RAMInputStream could not read\r
+    across more than across a single buffer boundary.\r
+\r
+ 3. Fix query parser so it accepts queries with unicode characters.\r
+    (briangoetz)\r
+\r
+ 4. Fix query parser so that PrefixQuery is used in preference to\r
+    WildcardQuery when there's only an asterisk at the end of the\r
+    term.  Previously PrefixQuery would never be used.\r
+\r
+ 5. Fix tests so they compile; fix ant file so it compiles tests\r
+    properly.  Added test cases for Analyzers and PriorityQueue.\r
+\r
+ 6. Updated demos, added Getting Started documentation. (acoliver)\r
+\r
+ 7. Added 'contributions' section to website & docs. (carlson)\r
+\r
+ 8. Removed JavaCC from source distribution for copyright reasons.\r
+    Folks must now download this separately from metamata in order to\r
+    compile Lucene.  (cutting)\r
+\r
+ 9. Substantially improved the performance of DateFilter by adding the\r
+    ability to reuse TermDocs objects.  (cutting)\r
+\r
+10. Added IndexReader methods:\r
+      public static boolean indexExists(String directory);\r
+      public static boolean indexExists(File directory);\r
+      public static boolean indexExists(Directory directory);\r
+      public static boolean isLocked(Directory directory);\r
+      public static void unlock(Directory directory);\r
+    (cutting, otis)\r
+\r
+11. Fixed bugs in GermanAnalyzer (gschwarz)\r
+\r
+\r
+1.2 RC2, 19 October 2001:\r
+ - added sources to distribution\r
+ - removed broken build scripts and libraries from distribution\r
+ - SegmentsReader: fixed potential race condition\r
+ - FSDirectory: fixed so that getDirectory(xxx,true) correctly\r
+   erases the directory contents, even when the directory\r
+   has already been accessed in this JVM.\r
+ - RangeQuery: Fix issue where an inclusive range query would\r
+   include the nearest term in the index above a non-existant\r
+   specified upper term.\r
+ - SegmentTermEnum: Fix NullPointerException in clone() method\r
+   when the Term is null.\r
+ - JDK 1.1 compatibility fix: disabled lock files for JDK 1.1,\r
+   since they rely on a feature added in JDK 1.2.\r
+\r
+1.2 RC1 (first Apache release), 2 October 2001:\r
+  - packages renamed from com.lucene to org.apache.lucene\r
+  - license switched from LGPL to Apache\r
+  - ant-only build -- no more makefiles\r
+  - addition of lock files--now fully thread & process safe\r
+  - addition of German stemmer\r
+  - MultiSearcher now supports low-level search API\r
+  - added RangeQuery, for term-range searching\r
+  - Analyzers can choose tokenizer based on field name\r
+  - misc bug fixes.\r
+\r
+1.01b (last Sourceforge release), 2 July 2001\r
+ . a few bug fixes\r
+ . new Query Parser\r
+ . new prefix query (search for "foo*" matches "food")\r
+\r
+1.0, 2000-10-04\r
+\r
+This release fixes a few serious bugs and also includes some\r
+performance optimizations, a stemmer, and a few other minor\r
+enhancements.\r
+\r
+0.04 2000-04-19\r
+\r
+Lucene now includes a grammar-based tokenizer, StandardTokenizer.\r
+\r
+The only tokenizer included in the previous release (LetterTokenizer)\r
+identified terms consisting entirely of alphabetic characters.  The\r
+new tokenizer uses a regular-expression grammar to identify more\r
+complex classes of terms, including numbers, acronyms, email\r
+addresses, etc.\r
+\r
+StandardTokenizer serves two purposes:\r
+\r
+ 1. It is a much better, general purpose tokenizer for use by\r
+    applications as is.\r
+\r
+    The easiest way for applications to start using\r
+    StandardTokenizer is to use StandardAnalyzer.\r
+\r
+ 2. It provides a good example of grammar-based tokenization.\r
+\r
+    If an application has special tokenization requirements, it can\r
+    implement a custom tokenizer by copying the directory containing\r
+    the new tokenizer into the application and modifying it\r
+    accordingly.\r
+\r
+0.01, 2000-03-30\r
+\r
+First open source release.\r
+\r
+The code has been re-organized into a new package and directory\r
+structure for this release.  It builds OK, but has not been tested\r
+beyond that since the re-organization.\r
diff --git a/mcs/tools/monkeydoc/Lucene.Net/HISTORY.txt b/mcs/tools/monkeydoc/Lucene.Net/HISTORY.txt
new file mode 100644 (file)
index 0000000..cf441d8
--- /dev/null
@@ -0,0 +1,419 @@
+Apache Lucene.Net History\r
+-------------------------\r
+\r
+17Feb10:\r
+       - Release: Apache Lucene.Net 2.9.2 build 001 "Beta"\r
+       - Port: Lucene core and test code \r
+\r
+       \r
+17Feb10:\r
+       - Release: Apache Lucene.Net 2.9.1 build 002 "Final"\r
+               \r
+       \r
+16Nov09:\r
+       - Release: Apache Lucene.Net 2.9.1 build 001 "Beta"\r
+\r
+       \r
+03Nov09:\r
+       - Release: Apache Lucene.Net 2.9.0 build 001 "Alpha"\r
+       - Port: Test code\r
+       - Note: Not all test are passing, this is just a port release\r
+\r
+       \r
+25Oct09:\r
+       - Release: Apache Lucene.Net 2.9.0 build 000 "Alpha"\r
+       - Port: Core Lucene code and demo code\r
+\r
+       \r
+19Aug09:\r
+       - Release: Apache Lucene.Net 2.4.0 build 002 "Final"\r
+\r
+       \r
+19Mar09:\r
+       - Release: Apache Lucene.Net.2.3.1 build 003 "Final"\r
+       - Fix: LUCENENET-106 (Lucene.NET is leaking memory)\r
+       - Fix: LUCENENET-116: Ambiguous name "text" in term class when in VB.NET\r
+       - Fix: LUCENENET-117: Bug in Lucene.Net.Index.DocumentsWriter.AppendPostings when compiled in release-mode\r
+       - Fix: LUCENENET-121: StopFilter  tries to access the i'th item of a hashtable as item[i] where i is not a key.\r
+       - Fix: LUCENENET-122: Bug in function "SupportClass.Number.ToString(long number)"\r
+       - Fix: LUCENENET-128: 2 bugs in BooleanScorer2\r
+       - Fix: LUCENENET-129 "Test case TestDateSort.TestReverseDateSort fails"\r
+       - Fix: LUCENENET-130 "bug in unit test helper class Lucene.Net.Search.CheckHits"\r
+       - Fix: LUCENENET-131 "bugs in TestIndexWriter test cases"\r
+       - Fix: LUCENENET-132 "bug in DocumentsWriter.BalanceRAM()"\r
+       - Fix: LUCENENET-133 "bug in IndexWriter.RegisterMerge()"\r
+       - Fix: LUCENENET-134 "broken test cases in unit test class TestIndexWriterDelete.cs"\r
+       - Fix: LUCENENET-138 "IndexReader.IndexExists() always returns false"\r
+       - Fix: LUCENENET-139 "Bugs in test codes (remote searching) stemming from opening the same port twice or registering an already registered http channel"\r
+       - Fix: LUCENENET-140 "System.Single.Parse --> SupportClass.Single.Parse"\r
+       - Fix: TestStressIndexing2.patch of LUCENENET-143 (NUnit test for Index/TestStressIndexing2)\r
+       - Fix: LUCENENET-146 (BUG in segmenttermpositionvector.cs (similar to LUCENENET-145))\r
+       - Fix: LUCENENET-150 (DocumentsWriter.ReusableStringReader does not implement some methods of StringReader)\r
+       - Fix: LUCENENET-151 (Bug in Lucene.Net.Search.MultiPhraseQuery.ExtractTerms(System.Collections.Hashtable terms))\r
+       - Fix: LUCENENET-152 (Nunit test for TestStressIndexAndSearching & TestStressLocks)\r
+       - Fix: LUCENENET-154 (Lucene.Net.Index.TestIndexWriterLockRelease: Unit test fails in tear down if directory does not exist)\r
+       - Fix: LUCENENET-155 (SetUp bug in 3 unit tests)\r
+       - Fix: LUCENENET-157 (SegmentMerger.AddIndexed expects *Reader.GetFieldNames to return a 1-dimensional ICollection)\r
+       - Fix: LUCENENET-158 (TestSpanNearOrdered02 - TestSpanNearOrdered05 fail)\r
+       - Fix: LUCENENET-159 (Lucene.Net.Search.Spans.SpanOrQuery)\r
+       - Fix: LUCENENET-160 (A small performance improvement in ExtendedFieldCacheImpl)\r
+       - Fix: LUCENENET-163 (Platform dependent path handling)\r
+       - Fix: LUCENENET-168 Sporadic failures in TestRemoteSearchable.cs\r
+       - Fix: LUCENENET-170 (BooleanClause serialization fails owing to issues with serializing Occur object)\r
+       - Fix: LUCENENET-174 RAMDirectory Not Correctly Serializing\r
+\r
+\r
+15Jul08:\r
+       - Release:  Apache Lucene.Net.2.3.1 build 002 "Beta"\r
+       - Port: Ported the "Test" code from Java to C#.\r
+       - Fix: A number of issues in Lucene.Net Core code -- Thanks to Doug Sale\r
+       - Fix: A number of issues in Lucene.Net Test code -- Thanks to Doug Sale\r
+\r
+\r
+24Jun08:\r
+       - Dev Release:  Apache Lucene.Net.2.3.1 build 001 "Alpha"\r
+       - Port: Prted the "Core" and "Demo" code from Java to C#.\r
+       - Issues: Test code is not released yet.\r
+       - Note: Requires Visual Studio 2005 or later and .NET 2.0 or later.\r
+       - Note: There is no 2.3.0 or 2.2 release (to correspond with the Java Lucene release).\r
+\r
+\r
+10Dec07:\r
+       - Release:  Apache Lucene.Net.2.1 build 003 "Release Candidate"\r
+       - Fix: LUCENENET-55 "Documents.DateTools has issue creating a Date in StringToDate()"\r
+       - Fix: LUCENENET-56 "Incorrect file in TestLockFactory.RmDir()"\r
+       - Fix: LUCENENET-57 "DocHelper in Tests not creating UTF8 Cleanly"\r
+       - Fix: LUCENENET-58 "Issue in CheckHits c# doesn't perform an Assert against a hashtable"\r
+       - Fix: LUCENENET-59 "QueryUtils has some invalid Asserts"\r
+       - Fix: LUCENENET-61 "Issue testing Backwards Compatibility"\r
+       - Fix: LUCENENET-62 "IndexReader.IndexExists() Fails if directory doesn't exists."\r
+       - Fix: LUCENENET-63 "FieldCacheImpl tries to parse a float in f format"\r
+       - Fix: LUCENENET-64 "TestDateFilter incorrectly gets total milliseconds"\r
+       - Fix: LUCENENET-65 "Test case "TestSerializable" uses the stream after closing"\r
+       - Fix: LUCENENET-66 "TestMergeAfterCopy fails in IndexFileDeleter"\r
+       - Fix: LUCENENET-67 "Bug in TestIndexWriter.TestAddIndexOnDiskFull"\r
+       - Fix: LUCENENET-68 "Bug in TestIndexWriterDelete.TestOperationsOnDiskFull"\r
+       - Fix: LUCENENET-69 "FSIndexInput.isFDValid() not ported correctly"\r
+       - Fix: LUCENENET-70 "TestParallelReader.TestDocument fails because of bug in "ParallelReader.Document(int n, FieldSelector fieldSelector)" method"\r
+       - Fix: LUCENENET-71 "TestParallelTermEnum.Test1 fails because of bug in "ParallelReader.Next" method"\r
+       - Fix: LUCENENET-72 "TestIndexReader bugs"\r
+       - Fix: LUCENENET-73 "TestDoc.cs --> access to a closed stream"\r
+       - Fix: LUCENENET-74 "SimpleFSLockFactory can not obtain lock correctly."\r
+       - Fix: LUCENENET-75 "FSDirectory does not correctly handle directory cache "DIRECTORIES""\r
+       - Fix: LUCENENET-76 "DisjunctionMaxQuery has unnecessary clone which causes it to fail unit tests"\r
+       - Fix: LUCENENET-77 "Bug in TestBinaryDocument.cs"\r
+       - Fix: LUCENENET-81 "TestTermVectorsWriter.cs bug"\r
+       - Fix: LUCENENET-82 "NUnite test for TestSimpleExplanations"\r
+       - Fix: LUCENENET-83 "NUnite test for TestComplexExplanations"\r
+       - Fix: LUCENENET-84 "Nunit test for TestMultiFieldQueryParser"\r
+       - Fix: LUCENENET-85 "SupportClass.Parse and System.Globalization.CultureInfo.CurrentCulture.NumberFormat.NumberDecimalSeparator"\r
+       - Fix: LUCENENET-87 "NUnite test for TestQueryParser"\r
+       - Fix: LUCENENET-88 "NUnit test for TestQueryParser -2"\r
+       - Fix: LUCENENET-89 "NUnit test for TestQueryParser -3"\r
+       - Fix: LUCENENET-90 "Nunit test for TestIndexModifier.TestIndex"\r
+       - Fix: LUCENENET-91 "NUnit test for TestQueryParser.TestStarParsing"\r
+       - Fix: LUCENENET-92 "NUnite test for QueryParser.TestMultiAnalyzer."\r
+       - Fix: LUCENENET-93 "NUnite test for Search.TestRemoteSearchable"\r
+       - Fix: LUCENENET-94 "NUnit test for Search.TestSort (RemoteSearchable issues)" (only TestSort.patch applied)\r
+       - Fix: LUCENENET-96 "NUnit test for Lucene.Net.Store.TestLockFactory.TestLockClassProperty"\r
+       - Fix: LUCENENET-101 "Using incorrect base when opening index"\r
+       - Fix: LUCENENET-100 "Problem with remoting of IComparable[] in FieldDoc.cs"\r
+       - Fix: LUCENENET-104 "Name of first index segment is empty string"\r
+\r
+\r
+11Aug07:\r
+       - Release:  Apache Lucene.Net.2.1 build 002 "Beta"\r
+       - Port: Ported the "Test" code from Java to C#\r
+       - Fix: LUCENENET-47: "Make up for Constansts.cs"\r
+       - Fix: LUCENENET-48 "Clone method of SegmentInfos.cs does'nt copy local fields/variables."\r
+       - Fix: LUCENENET-50 "Improvement for FSDirectory."\r
+       - Fix: LUCENENET-52 "IndexFileDeleter in svn trunk"\r
+       - Fix: LUCENENET-53 "SegmentsInfos.GetCurrentSegmentGeneration works incorrectly"\r
+       - Issues: A number of NUnit tests are failing.\r
+\r
+\r
+01May07:\r
+       - Dev Release:  Apache Lucene.Net.2.1 build 001 "early-Alpha"\r
+       - Issues: Optimizing an index will cause an exception.\r
+       - Issues: The "Test" code has not yet be ported for this release.\r
+       - Issues: Code in "contrib" have not been validated to work with this release.\r
+\r
+\r
+30Apr07:\r
+       - Patch:  Apache Lucene.Net.2.0 build 005 "Final"\r
+       - Fix: LUCENENET-37 "Exception while search in Lucene.Net and Index prepared by Lucene Java"\r
+\r
+\r
+11Mar07:\r
+       - Release:  Apache Lucene.Net.2.0 build 004 "Final"\r
+       - Fix: LUCENENET-36 "Countries using "," as decimal separator gets an exception in QueryParser.cs with a query like color~0.5"\r
+       - Fix: LUCENENET-35 "Tokenizer.Close should check if input is null"\r
+       - Fix: LUCENENET-33 "Frequent exceptions at Single Parse(String s)" \r
+       - Fix: LUCENENET-32 "Check hashtable in PhraseQuery.ExtractTerms for existing keys"\r
+       - Fix: LUCENENET-31 "elimate exception when casting TermFreqVector"\r
+       - Fix: LUCENENET-30 "Unnecessary boxing of bytes"\r
+\r
+\r
+27Dec06:\r
+       - Release:  Apache Lucene.Net.2.0 build 003 "Final"\r
+       - Fix: Lucene.Net.Search.TestSort.TestInternationalMultiSearcherSort -- NUnit test now passes\r
+       - Fix: Lucene.Net.Search.TestSort.TestInternationalSort -- NUnit test now passes\r
+\r
+\r
+27Nov06:\r
+       - Release:  Apache Lucene.Net.2.0 build 002 "Beta"\r
+       - Lucene.Net.Demo.SearchFiles.cs -- ported new code\r
+       - Lucene.Net.Index.SegmentReader.Get() -- changed Exception to SystemException\r
+       - Lucene.Net.Search.StringIndex.cs -- added a Close() method (to fix sort memory leak defect)\r
+       - Lucene.Net.Search.FieldCacheImpl.cs -- added a Close() method (to fix sort memory leak defect)\r
+       - Lucene.Net.Search.FieldSortHitQueue.cs -- added a Close() method (to fix sort memory leak defect)\r
+       - Lucene.Net.Search.IndexSearcher.cs -- added a Close() method (to fix sort memory leak defect)\r
+       - Lucene.Net.Search.MatchAllDocsQuery.Clone() -- removed this unused methods\r
+       - Lucene.Net.Search.MultiPhraseQuery.Clone() -- removed this unused methods\r
+       - Lucene.Net.Search.PrefixQuery.Clone() -- removed this unused methods\r
+       - Lucene.Net.Search.RangeQuery.Clone() -- removed this unused methods\r
+       - Lucene.Net.Index.FieldInfos.FieldName()/FieldInfo() -- avoid the use of exception throwing\r
+       - Issues: Lucene.Net.Search.TestSort.TestInternationalMultiSearcherSort -- NUnit test FAILS\r
+       - Issues: Lucene.Net.Search.TestSort.TestInternationalSort -- NUnit test FAILS\r
+\r
+\r
+17Aug06:\r
+       - Release:  Apache Lucene.Net.2.0 build 001 "Alpha"\r
+\r
+\r
+13Jul06:\r
+       - Release:  Apache Lucene.Net.1.9.1 build 001\r
+       - Port: Lucene.Net.Documents.TimeToString() -- re-based with the Java version\r
+       - Port: Lucene.Net.Index.IndexWriter.SetMaxBufferedDocs() -- re-based with the Java version\r
+       - Port: Lucene.Net.Store.BufferedIndexOutput.WriteBytes() -- re-based with the Java version\r
+       - Port: Lucene.Net.Store.RAMOutputStream.FlushBuffer() -- re-based with the Java version\r
+       - Port: Lucene.Net.Demo.* -- re-based with the Java version\r
+       - Port: Test.Lucene.Net.Index.TestCompoundFile.TestLargeWrites() -- new test case added\r
+       - Port: Test.Lucene.Net.StoreTest.Test() -- re-based with the Java version\r
+\r
+\r
+09Jul06:\r
+       - Release:  Apache Lucene.Net.1.9 RC1 build 005 "Final"\r
+       - Fix: Lucene.Net.Search.MultiPhraseQuery.ToString() -- was skipping one too many item during the iteration\r
+       - Fix: Lucene.Net.Index.ParallelReader.GetFieldNames() -- was adding the object instead of the dictionary value\r
+       - Fix: Lucene.Net.Index.ParallelReader.Add() -- was adding the object instead of the dictionary value\r
+       - Fix: Lucene.Net.Store.Obtain() -- changed name from obtain() to Obtain() (lower case to upper case)\r
+       - Fix: Lucene.Net.Index.SegmentReader.~SegmentReader() -- removed: System.Threading.Thread.SetData()\r
+       - Fix: Lucene.Net.Index.TermInfosReader.~TermInfosReader() -- removed: System.Threading.Thread.SetData()\r
+       - Fix: Lucene.Net.Documents.DateField.DATE_LEN -- must use SupportClass.Number.ToString() to get the length\r
+       - Fix: Lucene.Net.Util.ToStringUtils.Boost() -- wasn't adding ".0" when the value doesn't have a remainder\r
+       - Fix: Lucene.Net.Index.SegmentReader.CreateFakeNorms() -- was returning the wrong data member\r
+       - Fix: Lucene.Net.Documents.NumberTools -- value of MIN_STRING_VALUE and MAX_STRING_VALUE were wrong\r
+       - Fix: Test.Lucene.Net.Analysis.TestISOLatin1AccentFilter.TestU() -- file was not saved as unicode; thanks to Ben Tregenna\r
+       - Fix: Test.Lucene.Net.TestSearchForDuplicates.TestRun() -- wasn't using MemoryStream correctly\r
+       - Fix: Test.Lucene.Net.TestSearch.TestSearch_Renamed_Method() -- wasn't using MemoryStream correctly\r
+       - Fix: Test.Lucene.Net.* -- replaced "[TestFixtureSetUp]" to "[SetUp]" and "[FixtureTearDown]" to "[TearDown]"\r
+       - Fix: Test.Lucene.Net.Index.TestParallelReader -- was comparing objects instead of the dictionary value\r
+       - Fix: Test.Lucene.Net.Index.TestSegmentReader -- was comparing objects instead of the dictionary value\r
+       - Fix: Test.Lucene.Net.Index.TestTermVectorWriter -- was not calling InitBloc()\r
+       - Fix: Test.Lucene.Net.Analysis.Setup() -- was adding to the hash without checking if the key already exist\r
+       - Fix: Test.Lucene.Net.Index.TestMultiReader/TestSegmentReader/TestSegmentTermDoc.cs -- all those needed their class member variables re-initialized\r
+\r
+\r
+13Jun06:\r
+       - Release:  Apache Lucene.Net.1.9 RC1 build 004 Beta\r
+       - Fix: Lucene.Net.Search.FieldCacheImpl.GetFloats() -- must remove 'f" or "F" for System.Single.Parse() to work\r
+       - Fix: Lucene.Net.Index.GetFieldnames() -- was storing the object instead the value in the object\r
+       - Fix: Test.Lucene.Net.Index.CollectionContains() -- need to compare strings, not objects\r
+       - Fix: Test.Lucene.Net.Serch.TestKnownSetOfDocuments() -- don't fail if an item doesn't exist\r
+\r
+\r
+03Jun06:\r
+       - Release: Apache Lucene.Net.1.9 RC1 build 003 Alpha\r
+       - Note: This is the first release of Lucene.Net 1.9 to SVN\r
+       - Note: Added ZIP compression support via reflection.  Thanks to Eyal Post\r
+       - Note: Fixed bugs in the code which were expose via the NUnit "Test" code\r
+       - Note: NUnit "Test" code has been ported to Lucene.Net.  Out of 307 tests 58 are failing\r
+       - Note: There are still some remaining port work to be done; look for the text "Aroush" in the code\r
+       - Issue: There are still some code not fully ported; search for "Aroush" to find those codes\r
+       - Issue: The NUnit test code has not been ported yet\r
+       - Issue: Demo.IndexHtml won't work due to some bug in the area of in-memory stream porting\r
+\r
+\r
+07Feb06:\r
+       - Release: Lucene.Net.1.9 RC1 build 002 Alpha\r
+       - Note: This release is based on the current Java code release of 1.9 RC1\r
+       - Note: This release contains all the fixes currently implemented for 1.4.3 build 004\r
+       - Note: There are still some remaining port work to be done; look for the text "Aroush" in the code\r
+       - Issue: The NUnit test code has not been ported yet\r
+       - Issue: Demo.IndexHtml won't work due to some bug in the area of in-memory stream porting\r
+\r
+\r
+26May05:\r
+       - Release: Lucene.Net.1.9 RC1 build 001 Alpha\r
+       - Issue: There are still some code not fully ported; search for "Aroush" to find those codes.\r
+       - Issue: The NUnit test code has not been ported yet (the current code is still based on 1.4.3 final)\r
+\r
+\r
+15Dec05:\r
+       - Release: Lucene.Net.1.4.3 final build 004 to the Apache incubator site\r
+       - Clean-up: VS.Net project settings.\r
+\r
+\r
+21Nov05:\r
+       - Release: Lucene.Net.1.4.3 final build 004\r
+       - Fix: Fixed System.Object cast in Lucene.Net.Search.RangeQuery and Lucene.Net.Analysis.PorterStemFilter -- Thanks to Jason\r
+       - Note: Moved project to Apache at: http://svn.apache.org/repos/asf/incubator/lucene.net/\r
+\r
+\r
+23Oct05:\r
+       - Release: Lucene.Net.1.4.3 final build 003\r
+       - Fix: Memory leak -- Thanks to Marcus.\r
+       - Fix: Remoting class and other misused of System.Object cast -- Thanks to Jason\r
+\r
+\r
+26May05:\r
+       - Release: Lucene.Net.1.9 RC1 build 001 Alpha\r
+       - Issue: There are still some code not fully ported; search for "Aroush" to find those codes.\r
+       - Issue: The NUnit test code has not been ported yet (the current code is still based on 1.4.3 final)\r
+\r
+\r
+22Feb05:\r
+       - Release: Lucene.Net.1.4.3 final build 002\r
+       - Fix: Lucene.Net.Index.MultiReader.cs -- fixed GetIndexedFieldNames()\r
+\r
+\r
+13Feb05:\r
+       - Release: Lucene.Net.1.4.3 final build 001\r
+       - Fix: Lucene.Net.Search.TermQuery.Clone() -- was returning null, so removed it.\r
+       - Fix: Lucene.Net.Documents.Field.cs -- replaced StreamReader() to TextReader()\r
+       - Fix: Lucene.Net.Search.RemoteSearchable.cs -- added InitializeLifetimeService()\r
+       - Fix: Lucene.Net.Document.DateField.cs -- fixed StringToDate()\r
+       - Fix: Lucene.Net.Store.RAMDirectory.cs -- fixed RAMDirectory()\r
+       - Issue: Demo.IndexHtml won't work due to some bug in the area of in-memory stream porting\r
+\r
+\r
+05Jan05:\r
+       - Release: Lucene.Net.1.4.3 RC2 build 001\r
+       - Fix: Lucene.Net.Search.Query.MergeBooleanQueries(); cast type was wrong.\r
+       - Fix: Demo.IndexHtml; can index now but searching on the index file won't work, yet.\r
+\r
+\r
+21Dec04:\r
+       - Release: Lucene.Net.1.4.3 RC1 build 001\r
+       - Fix: Document.DateField.StringToTime() -- can't use System.Convert.ToInt64()\r
+\r
+\r
+06Dec04:\r
+       - Release: Lucene.Net.1.4.3 beta build 001\r
+\r
+\r
+22Nov04:\r
+       - Release: Lucene.Net.1.4.0 RC1 build 001\r
+       - Fix: Changed some data types from 'int' to 'long'\r
+       - Fix: changed some codes from base.Clone() to this.Clone()\r
+\r
+\r
+10Nov04:\r
+       - Release: Lucene.Net.1.4.0 beta build 002 (targeted for "final")\r
+       - Fix: Document.Fields() now returns an IEnumerable.  Thanks to Marcus.\r
+       - Fix: Use SupportClass.Single.Parse() to parse numbers ending with 'f'\r
+\r
+\r
+03Nov04:\r
+       - Release: Lucene.Net.1.4.0 beta build 001\r
+       - Release Note: 159 test run, 7 failed due to non-existing remote-Lucene setup\r
+       - Failed Test:\r
+       - Search.TestRemoteSearchable.*\r
+       - Search.TestSort.TestNormalizedScores/TestRemoteCustomSort/TestRemoteSort\r
+       - Fix: Removed 'ref' keyword usage from DateTime.\r
+       - Port: TestQueryParser.cs -- done. thanks to Radu.\r
+       - Fix: TestQueryParser.cs -- date-range formatting issues.\r
+       - Fix: Lucene.Net.QueryParsers.GetRangeQuery() -- date-range formatting issues.\r
+       - Fix: Use: System.IO.Path.Combine("path-1", "path-2") instead of "path-1" + "\\" + "path-2"\r
+\r
+\r
+29Oct04:\r
+       - Port: Ported "test" out of ~160 tests, only ~14 fail.\r
+       - Port: SupportClass.Character.ForDigit() -- done. thanks to Monsur\r
+       - Port: Lucene.Net.Documents.DateField.cs -- done. thanks to Monsur\r
+       - Fix: Lucene.Net.Search.BooleanScorer\r
+       - Fix: Lucene.Net.Search.FilteredQuery\r
+       - Fix: Lucene.Net.Search.SortScorers\r
+\r
+\r
+25Oct04:\r
+       - Port: Ported "test" more than 2/3 of the NUnit test now pass\r
+       - Fix: Fixed query (ToString()) formatting issue ("2" -> "2.0") to pass NUnit test\r
+       - Fix: Field items iteration\r
+\r
+\r
+19Oct04:\r
+       - Fix: TermInfoReader.get_Renamed() -> TermInfoReader.Get()\r
+       - Fix: Searching now works\r
+\r
+\r
+18Oct04:\r
+       - Fix: Indexing now works; indexing with Lucene.Net and searching on the same index via Jakarta Lucene\r
+       - Fix: System.IO.TextReader.Read() returns 0 not -1 so check for '<= 0'\r
+\r
+\r
+16Oct04:\r
+       - Port: QueryTermVector.cs -- done. thanks to Monsur\r
+\r
+\r
+14Oct04:\r
+       - Port: SpanNearQuery.cs -- done. thanks to Radu\r
+       - Port: SpanOrQuery.cs -- done. thanks to Radu\r
+       - Port: FSDirectory.cs -- done. thanks to Seth & Bill\r
+       - Port: IndexReader.cs -- done. thanks to Bill\r
+       - Port: IndexWriter.cs -- done. thanks to Bill\r
+\r
+\r
+13Oct04 Lucene.Net.1.4-alpha build 002\r
+       - Port: SpanNearQuery.cs & SpanOrQuery.cs -- thanks to Radu\r
+       - Fix: FSDirectory.cs, IndexReader.cs & IndexWriter.cs -- thanks to Seth and Bill\r
+       - Fix: FSDirectory.RenameFile()\r
+\r
+\r
+13Oct04 Lucene.Net.1.4-alpha build 002\r
+       - Note: First effort to port Jakarta Lucene 1.4-final to C#\r
+       - Note: Ported emements are: "src" and "demo"\r
+       - Note: There are still outstanding un-ported code (12 in all) which are being looked at\r
+       - Note: The code compiles and runs as is, but you can't index or search\r
+       - Fix: PriorityQueue.Out() -> PriorityQueue.Put()\r
+       - Port: MultiReader.cs & Document.cs -- thanks to Radu\r
+\r
+\r
+15Sep04 Lucene.Net.1.3-rc3 build 001\r
+       - Revival of Lucene by repackaging 1.3rc1-001 and 1.3rc3-001\r
+\r
+\r
+29Sep04 Lucene.Net 1.3-rc3 build 001\r
+       - initial release of jakarta lucene 1.3-rc3\r
+               for changes: http://cvs.apache.org/viewcvs.cgi/*checkout*/jakarta-lucene/CHANGES.txt?rev=1.58 \r
+       - fix: DateToString bug\r
+               for more info: https://sourceforge.net/tracker/?func=detail&atid=582307&aid=910832&group_id=87200 \r
+\r
+\r
+18Apr04        Lucene.Net 1.3-rc2 build 002\r
+       - add: demos, web demos and tests\r
+       - fix: add FileAccess.Read key with FileMode.Open when open \r
+              file stream for read\r
+       - fix: name style updated \r
+                       Document.cs:GetField()\r
+                       QueryParser.cs: GenerateParseException()\r
+                       Search/BooleanQuery.cs:GetMaxClauseCount(),SetMaxClauseCount()\r
+       - new: Lucene.Net National Language Support Pack (Lucene.Net.NLS Pack)\r
+               support: Brazilian, CJK, Chinese, Czech,  French and Dutch Analysers\r
+\r
+\r
+20Dec03 Lucene.Net 1.3-rc2 build 001\r
+       - initial release for 1.3-rc2\r
+         only Lucene.Net without demo and tests\r
+\r
+\r
+09Jan03 Lucene.Net 1.3-rc1 build 002\r
+       - update: documentation and comments\r
+       - bug fixed: wildcard search crash lucene - fixed\r
+                 look TestQueryFSDirectory project for details\r
+       - bug fixed: all tests and demos work \r
+\r
+\r
+08May03 Lucene.Net 1.3-rc1 build 001\r
+       - initial release\r
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net.dll.sources b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net.dll.sources
new file mode 100644 (file)
index 0000000..be8543f
--- /dev/null
@@ -0,0 +1,419 @@
+Lucene.Net/Analysis/Analyzer.cs
+Lucene.Net/Analysis/ASCIIFoldingFilter.cs
+Lucene.Net/Analysis/BaseCharFilter.cs
+Lucene.Net/Analysis/CachingTokenFilter.cs
+Lucene.Net/Analysis/CharacterCache.cs
+Lucene.Net/Analysis/CharArraySet.cs
+Lucene.Net/Analysis/CharFilter.cs
+Lucene.Net/Analysis/CharReader.cs
+Lucene.Net/Analysis/CharStream.cs
+Lucene.Net/Analysis/CharTokenizer.cs
+Lucene.Net/Analysis/ISOLatin1AccentFilter.cs
+Lucene.Net/Analysis/KeywordAnalyzer.cs
+Lucene.Net/Analysis/KeywordTokenizer.cs
+Lucene.Net/Analysis/LengthFilter.cs
+Lucene.Net/Analysis/LetterTokenizer.cs
+Lucene.Net/Analysis/LowerCaseFilter.cs
+Lucene.Net/Analysis/LowerCaseTokenizer.cs
+Lucene.Net/Analysis/MappingCharFilter.cs
+Lucene.Net/Analysis/NormalizeCharMap.cs
+Lucene.Net/Analysis/NumericTokenStream.cs
+Lucene.Net/Analysis/PerFieldAnalyzerWrapper.cs
+Lucene.Net/Analysis/PorterStemFilter.cs
+Lucene.Net/Analysis/PorterStemmer.cs
+Lucene.Net/Analysis/SimpleAnalyzer.cs
+Lucene.Net/Analysis/SinkTokenizer.cs
+Lucene.Net/Analysis/Standard/StandardAnalyzer.cs
+Lucene.Net/Analysis/Standard/StandardFilter.cs
+Lucene.Net/Analysis/Standard/StandardTokenizer.cs
+Lucene.Net/Analysis/Standard/StandardTokenizerImpl.cs
+Lucene.Net/Analysis/StopAnalyzer.cs
+Lucene.Net/Analysis/StopFilter.cs
+Lucene.Net/Analysis/TeeSinkTokenFilter.cs
+Lucene.Net/Analysis/TeeTokenFilter.cs
+Lucene.Net/Analysis/Token.cs
+Lucene.Net/Analysis/Tokenattributes/FlagsAttribute.cs
+Lucene.Net/Analysis/Tokenattributes/FlagsAttributeImpl.cs
+Lucene.Net/Analysis/Tokenattributes/OffsetAttribute.cs
+Lucene.Net/Analysis/Tokenattributes/OffsetAttributeImpl.cs
+Lucene.Net/Analysis/Tokenattributes/PayloadAttribute.cs
+Lucene.Net/Analysis/Tokenattributes/PayloadAttributeImpl.cs
+Lucene.Net/Analysis/Tokenattributes/PositionIncrementAttribute.cs
+Lucene.Net/Analysis/Tokenattributes/PositionIncrementAttributeImpl.cs
+Lucene.Net/Analysis/Tokenattributes/TermAttribute.cs
+Lucene.Net/Analysis/Tokenattributes/TermAttributeImpl.cs
+Lucene.Net/Analysis/Tokenattributes/TypeAttribute.cs
+Lucene.Net/Analysis/Tokenattributes/TypeAttributeImpl.cs
+Lucene.Net/Analysis/TokenFilter.cs
+Lucene.Net/Analysis/Tokenizer.cs
+Lucene.Net/Analysis/TokenStream.cs
+Lucene.Net/Analysis/TokenWrapper.cs
+Lucene.Net/Analysis/WhitespaceAnalyzer.cs
+Lucene.Net/Analysis/WhitespaceTokenizer.cs
+Lucene.Net/Analysis/WordlistLoader.cs
+Lucene.Net/AssemblyInfo.cs
+Lucene.Net/Document/AbstractField.cs
+Lucene.Net/Document/CompressionTools.cs
+Lucene.Net/Document/DateField.cs
+Lucene.Net/Document/DateTools.cs
+Lucene.Net/Document/Document.cs
+Lucene.Net/Document/Field.cs
+Lucene.Net/Document/Fieldable.cs
+Lucene.Net/Document/FieldSelector.cs
+Lucene.Net/Document/FieldSelectorResult.cs
+Lucene.Net/Document/LoadFirstFieldSelector.cs
+Lucene.Net/Document/MapFieldSelector.cs
+Lucene.Net/Document/NumberTools.cs
+Lucene.Net/Document/NumericField.cs
+Lucene.Net/Document/SetBasedFieldSelector.cs
+Lucene.Net/Index/AbstractAllTermDocs.cs
+Lucene.Net/Index/AllTermDocs.cs
+Lucene.Net/Index/BufferedDeletes.cs
+Lucene.Net/Index/ByteBlockPool.cs
+Lucene.Net/Index/ByteSliceReader.cs
+Lucene.Net/Index/ByteSliceWriter.cs
+Lucene.Net/Index/CharBlockPool.cs
+Lucene.Net/Index/CheckIndex.cs
+Lucene.Net/Index/CompoundFileReader.cs
+Lucene.Net/Index/CompoundFileWriter.cs
+Lucene.Net/Index/ConcurrentMergeScheduler.cs
+Lucene.Net/Index/CorruptIndexException.cs
+Lucene.Net/Index/DefaultSkipListReader.cs
+Lucene.Net/Index/DefaultSkipListWriter.cs
+Lucene.Net/Index/DirectoryOwningReader.cs
+Lucene.Net/Index/DirectoryReader.cs
+Lucene.Net/Index/DocConsumer.cs
+Lucene.Net/Index/DocConsumerPerThread.cs
+Lucene.Net/Index/DocFieldConsumer.cs
+Lucene.Net/Index/DocFieldConsumerPerField.cs
+Lucene.Net/Index/DocFieldConsumerPerThread.cs
+Lucene.Net/Index/DocFieldConsumers.cs
+Lucene.Net/Index/DocFieldConsumersPerField.cs
+Lucene.Net/Index/DocFieldConsumersPerThread.cs
+Lucene.Net/Index/DocFieldProcessor.cs
+Lucene.Net/Index/DocFieldProcessorPerField.cs
+Lucene.Net/Index/DocFieldProcessorPerThread.cs
+Lucene.Net/Index/DocInverter.cs
+Lucene.Net/Index/DocInverterPerField.cs
+Lucene.Net/Index/DocInverterPerThread.cs
+Lucene.Net/Index/DocumentsWriter.cs
+Lucene.Net/Index/DocumentsWriterThreadState.cs
+Lucene.Net/Index/FieldInfo.cs
+Lucene.Net/Index/FieldInfos.cs
+Lucene.Net/Index/FieldInvertState.cs
+Lucene.Net/Index/FieldReaderException.cs
+Lucene.Net/Index/FieldSortedTermVectorMapper.cs
+Lucene.Net/Index/FieldsReader.cs
+Lucene.Net/Index/FieldsWriter.cs
+Lucene.Net/Index/FilterIndexReader.cs
+Lucene.Net/Index/FormatPostingsDocsConsumer.cs
+Lucene.Net/Index/FormatPostingsDocsWriter.cs
+Lucene.Net/Index/FormatPostingsFieldsConsumer.cs
+Lucene.Net/Index/FormatPostingsFieldsWriter.cs
+Lucene.Net/Index/FormatPostingsPositionsConsumer.cs
+Lucene.Net/Index/FormatPostingsPositionsWriter.cs
+Lucene.Net/Index/FormatPostingsTermsConsumer.cs
+Lucene.Net/Index/FormatPostingsTermsWriter.cs
+Lucene.Net/Index/FreqProxFieldMergeState.cs
+Lucene.Net/Index/FreqProxTermsWriter.cs
+Lucene.Net/Index/FreqProxTermsWriterPerField.cs
+Lucene.Net/Index/FreqProxTermsWriterPerThread.cs
+Lucene.Net/Index/IndexCommit.cs
+Lucene.Net/Index/IndexCommitPoint.cs
+Lucene.Net/Index/IndexDeletionPolicy.cs
+Lucene.Net/Index/IndexFileDeleter.cs
+Lucene.Net/Index/IndexFileNameFilter.cs
+Lucene.Net/Index/IndexFileNames.cs
+Lucene.Net/Index/IndexModifier.cs
+Lucene.Net/Index/IndexReader.cs
+Lucene.Net/Index/IndexWriter.cs
+Lucene.Net/Index/IntBlockPool.cs
+Lucene.Net/Index/InvertedDocConsumer.cs
+Lucene.Net/Index/InvertedDocConsumerPerField.cs
+Lucene.Net/Index/InvertedDocConsumerPerThread.cs
+Lucene.Net/Index/InvertedDocEndConsumer.cs
+Lucene.Net/Index/InvertedDocEndConsumerPerField.cs
+Lucene.Net/Index/InvertedDocEndConsumerPerThread.cs
+Lucene.Net/Index/KeepOnlyLastCommitDeletionPolicy.cs
+Lucene.Net/Index/LogByteSizeMergePolicy.cs
+Lucene.Net/Index/LogDocMergePolicy.cs
+Lucene.Net/Index/LogMergePolicy.cs
+Lucene.Net/Index/MergeDocIDRemapper.cs
+Lucene.Net/Index/MergePolicy.cs
+Lucene.Net/Index/MergeScheduler.cs
+Lucene.Net/Index/MultiLevelSkipListReader.cs
+Lucene.Net/Index/MultiLevelSkipListWriter.cs
+Lucene.Net/Index/MultipleTermPositions.cs
+Lucene.Net/Index/MultiReader.cs
+Lucene.Net/Index/NormsWriter.cs
+Lucene.Net/Index/NormsWriterPerField.cs
+Lucene.Net/Index/NormsWriterPerThread.cs
+Lucene.Net/Index/ParallelReader.cs
+Lucene.Net/Index/Payload.cs
+Lucene.Net/Index/PositionBasedTermVectorMapper.cs
+Lucene.Net/Index/RawPostingList.cs
+Lucene.Net/Index/ReadOnlyDirectoryReader.cs
+Lucene.Net/Index/ReadOnlySegmentReader.cs
+Lucene.Net/Index/ReusableStringReader.cs
+Lucene.Net/Index/SegmentInfo.cs
+Lucene.Net/Index/SegmentInfos.cs
+Lucene.Net/Index/SegmentMergeInfo.cs
+Lucene.Net/Index/SegmentMergeQueue.cs
+Lucene.Net/Index/SegmentMerger.cs
+Lucene.Net/Index/SegmentReader.cs
+Lucene.Net/Index/SegmentTermDocs.cs
+Lucene.Net/Index/SegmentTermEnum.cs
+Lucene.Net/Index/SegmentTermPositions.cs
+Lucene.Net/Index/SegmentTermPositionVector.cs
+Lucene.Net/Index/SegmentTermVector.cs
+Lucene.Net/Index/SegmentWriteState.cs
+Lucene.Net/Index/SerialMergeScheduler.cs
+Lucene.Net/Index/SnapshotDeletionPolicy.cs
+Lucene.Net/Index/SortedTermVectorMapper.cs
+Lucene.Net/Index/StaleReaderException.cs
+Lucene.Net/Index/StoredFieldsWriter.cs
+Lucene.Net/Index/StoredFieldsWriterPerThread.cs
+Lucene.Net/Index/Term.cs
+Lucene.Net/Index/TermBuffer.cs
+Lucene.Net/Index/TermDocs.cs
+Lucene.Net/Index/TermEnum.cs
+Lucene.Net/Index/TermFreqVector.cs
+Lucene.Net/Index/TermInfo.cs
+Lucene.Net/Index/TermInfosReader.cs
+Lucene.Net/Index/TermInfosWriter.cs
+Lucene.Net/Index/TermPositions.cs
+Lucene.Net/Index/TermPositionVector.cs
+Lucene.Net/Index/TermsHash.cs
+Lucene.Net/Index/TermsHashConsumer.cs
+Lucene.Net/Index/TermsHashConsumerPerField.cs
+Lucene.Net/Index/TermsHashConsumerPerThread.cs
+Lucene.Net/Index/TermsHashPerField.cs
+Lucene.Net/Index/TermsHashPerThread.cs
+Lucene.Net/Index/TermVectorEntry.cs
+Lucene.Net/Index/TermVectorEntryFreqSortedComparator.cs
+Lucene.Net/Index/TermVectorMapper.cs
+Lucene.Net/Index/TermVectorOffsetInfo.cs
+Lucene.Net/Index/TermVectorsReader.cs
+Lucene.Net/Index/TermVectorsTermsWriter.cs
+Lucene.Net/Index/TermVectorsTermsWriterPerField.cs
+Lucene.Net/Index/TermVectorsTermsWriterPerThread.cs
+Lucene.Net/Index/TermVectorsWriter.cs
+Lucene.Net/LucenePackage.cs
+Lucene.Net/LZOCompressor.cs
+Lucene.Net/Messages/Message.cs
+Lucene.Net/Messages/MessageImpl.cs
+Lucene.Net/Messages/NLS.cs
+Lucene.Net/Messages/NLSException.cs
+Lucene.Net/QueryParser/CharStream.cs
+Lucene.Net/QueryParser/FastCharStream.cs
+Lucene.Net/QueryParser/MultiFieldQueryParser.cs
+Lucene.Net/QueryParser/ParseException.cs
+Lucene.Net/QueryParser/QueryParser.cs
+Lucene.Net/QueryParser/QueryParserConstants.cs
+Lucene.Net/QueryParser/QueryParserTokenManager.cs
+Lucene.Net/QueryParser/Token.cs
+Lucene.Net/QueryParser/TokenMgrError.cs
+Lucene.Net/Search/BooleanClause.cs
+Lucene.Net/Search/BooleanQuery.cs
+Lucene.Net/Search/BooleanScorer.cs
+Lucene.Net/Search/BooleanScorer2.cs
+Lucene.Net/Search/CachingSpanFilter.cs
+Lucene.Net/Search/CachingWrapperFilter.cs
+Lucene.Net/Search/Collector.cs
+Lucene.Net/Search/ComplexExplanation.cs
+Lucene.Net/Search/ConjunctionScorer.cs
+Lucene.Net/Search/ConstantScoreQuery.cs
+Lucene.Net/Search/ConstantScoreRangeQuery.cs
+Lucene.Net/Search/DefaultSimilarity.cs
+Lucene.Net/Search/DisjunctionMaxQuery.cs
+Lucene.Net/Search/DisjunctionMaxScorer.cs
+Lucene.Net/Search/DisjunctionSumScorer.cs
+Lucene.Net/Search/DocIdSet.cs
+Lucene.Net/Search/DocIdSetIterator.cs
+Lucene.Net/Search/ExactPhraseScorer.cs
+Lucene.Net/Search/Explanation.cs
+Lucene.Net/Search/ExtendedFieldCache.cs
+Lucene.Net/Search/FieldCache.cs
+Lucene.Net/Search/FieldCacheImpl.cs
+Lucene.Net/Search/FieldCacheRangeFilter.cs
+Lucene.Net/Search/FieldCacheTermsFilter.cs
+Lucene.Net/Search/FieldComparator.cs
+Lucene.Net/Search/FieldComparatorSource.cs
+Lucene.Net/Search/FieldDoc.cs
+Lucene.Net/Search/FieldDocSortedHitQueue.cs
+Lucene.Net/Search/FieldSortedHitQueue.cs
+Lucene.Net/Search/FieldValueHitQueue.cs
+Lucene.Net/Search/Filter.cs
+Lucene.Net/Search/FilteredDocIdSet.cs
+Lucene.Net/Search/FilteredDocIdSetIterator.cs
+Lucene.Net/Search/FilteredQuery.cs
+Lucene.Net/Search/FilteredTermEnum.cs
+Lucene.Net/Search/FilterManager.cs
+Lucene.Net/Search/Function/ByteFieldSource.cs
+Lucene.Net/Search/Function/CustomScoreProvider.cs
+Lucene.Net/Search/Function/CustomScoreQuery.cs
+Lucene.Net/Search/Function/DocValues.cs
+Lucene.Net/Search/Function/FieldCacheSource.cs
+Lucene.Net/Search/Function/FieldScoreQuery.cs
+Lucene.Net/Search/Function/FloatFieldSource.cs
+Lucene.Net/Search/Function/IntFieldSource.cs
+Lucene.Net/Search/Function/MultiValueSource.cs
+Lucene.Net/Search/Function/OrdFieldSource.cs
+Lucene.Net/Search/Function/ReverseOrdFieldSource.cs
+Lucene.Net/Search/Function/ShortFieldSource.cs
+Lucene.Net/Search/Function/ValueSource.cs
+Lucene.Net/Search/Function/ValueSourceQuery.cs
+Lucene.Net/Search/FuzzyQuery.cs
+Lucene.Net/Search/FuzzyTermEnum.cs
+Lucene.Net/Search/Hit.cs
+Lucene.Net/Search/HitCollector.cs
+Lucene.Net/Search/HitCollectorWrapper.cs
+Lucene.Net/Search/HitIterator.cs
+Lucene.Net/Search/HitQueue.cs
+Lucene.Net/Search/Hits.cs
+Lucene.Net/Search/IndexSearcher.cs
+Lucene.Net/Search/MatchAllDocsQuery.cs
+Lucene.Net/Search/MultiPhraseQuery.cs
+Lucene.Net/Search/MultiSearcher.cs
+Lucene.Net/Search/MultiTermQuery.cs
+Lucene.Net/Search/MultiTermQueryWrapperFilter.cs
+Lucene.Net/Search/NumericRangeFilter.cs
+Lucene.Net/Search/NumericRangeQuery.cs
+Lucene.Net/Search/ParallelMultiSearcher.cs
+Lucene.Net/Search/Payloads/AveragePayloadFunction.cs
+Lucene.Net/Search/Payloads/BoostingTermQuery.cs
+Lucene.Net/Search/Payloads/MaxPayloadFunction.cs
+Lucene.Net/Search/Payloads/MinPayloadFunction.cs
+Lucene.Net/Search/Payloads/PayloadFunction.cs
+Lucene.Net/Search/Payloads/PayloadNearQuery.cs
+Lucene.Net/Search/Payloads/PayloadSpanUtil.cs
+Lucene.Net/Search/Payloads/PayloadTermQuery.cs
+Lucene.Net/Search/PhrasePositions.cs
+Lucene.Net/Search/PhraseQuery.cs
+Lucene.Net/Search/PhraseQueue.cs
+Lucene.Net/Search/PhraseScorer.cs
+Lucene.Net/Search/PositiveScoresOnlyCollector.cs
+Lucene.Net/Search/PrefixFilter.cs
+Lucene.Net/Search/PrefixQuery.cs
+Lucene.Net/Search/PrefixTermEnum.cs
+Lucene.Net/Search/Query.cs
+Lucene.Net/Search/QueryFilter.cs
+Lucene.Net/Search/QueryTermVector.cs
+Lucene.Net/Search/QueryWrapperFilter.cs
+Lucene.Net/Search/RangeFilter.cs
+Lucene.Net/Search/RangeQuery.cs
+Lucene.Net/Search/ReqExclScorer.cs
+Lucene.Net/Search/ReqOptSumScorer.cs
+Lucene.Net/Search/ScoreCachingWrappingScorer.cs
+Lucene.Net/Search/ScoreDoc.cs
+Lucene.Net/Search/ScoreDocComparator.cs
+Lucene.Net/Search/Scorer.cs
+Lucene.Net/Search/Searchable.cs
+Lucene.Net/Search/Searcher.cs
+Lucene.Net/Search/Similarity.cs
+Lucene.Net/Search/SimilarityDelegator.cs
+Lucene.Net/Search/SloppyPhraseScorer.cs
+Lucene.Net/Search/Sort.cs
+Lucene.Net/Search/SortComparator.cs
+Lucene.Net/Search/SortComparatorSource.cs
+Lucene.Net/Search/SortField.cs
+Lucene.Net/Search/SpanFilter.cs
+Lucene.Net/Search/SpanFilterResult.cs
+Lucene.Net/Search/SpanQueryFilter.cs
+Lucene.Net/Search/Spans/FieldMaskingSpanQuery.cs
+Lucene.Net/Search/Spans/NearSpansOrdered.cs
+Lucene.Net/Search/Spans/NearSpansUnordered.cs
+Lucene.Net/Search/Spans/SpanFirstQuery.cs
+Lucene.Net/Search/Spans/SpanNearQuery.cs
+Lucene.Net/Search/Spans/SpanNotQuery.cs
+Lucene.Net/Search/Spans/SpanOrQuery.cs
+Lucene.Net/Search/Spans/SpanQuery.cs
+Lucene.Net/Search/Spans/Spans.cs
+Lucene.Net/Search/Spans/SpanScorer.cs
+Lucene.Net/Search/Spans/SpanTermQuery.cs
+Lucene.Net/Search/Spans/SpanWeight.cs
+Lucene.Net/Search/Spans/TermSpans.cs
+Lucene.Net/Search/TermQuery.cs
+Lucene.Net/Search/TermRangeFilter.cs
+Lucene.Net/Search/TermRangeQuery.cs
+Lucene.Net/Search/TermRangeTermEnum.cs
+Lucene.Net/Search/TermScorer.cs
+Lucene.Net/Search/TimeLimitedCollector.cs
+Lucene.Net/Search/TimeLimitingCollector.cs
+Lucene.Net/Search/TopDocCollector.cs
+Lucene.Net/Search/TopDocs.cs
+Lucene.Net/Search/TopDocsCollector.cs
+Lucene.Net/Search/TopFieldCollector.cs
+Lucene.Net/Search/TopFieldDocCollector.cs
+Lucene.Net/Search/TopFieldDocs.cs
+Lucene.Net/Search/TopScoreDocCollector.cs
+Lucene.Net/Search/Weight.cs
+Lucene.Net/Search/WildcardQuery.cs
+Lucene.Net/Search/WildcardTermEnum.cs
+Lucene.Net/Store/AlreadyClosedException.cs
+Lucene.Net/Store/BufferedIndexInput.cs
+Lucene.Net/Store/BufferedIndexOutput.cs
+Lucene.Net/Store/CheckSumIndexInput.cs
+Lucene.Net/Store/CheckSumIndexOutput.cs
+Lucene.Net/Store/Directory.cs
+Lucene.Net/Store/FileSwitchDirectory.cs
+Lucene.Net/Store/FSDirectory.cs
+Lucene.Net/Store/FSLockFactory.cs
+Lucene.Net/Store/IndexInput.cs
+Lucene.Net/Store/IndexOutput.cs
+Lucene.Net/Store/Lock.cs
+Lucene.Net/Store/LockFactory.cs
+Lucene.Net/Store/LockObtainFailedException.cs
+Lucene.Net/Store/LockReleaseFailedException.cs
+Lucene.Net/Store/LockStressTest.cs
+Lucene.Net/Store/LockVerifyServer.cs
+Lucene.Net/Store/MMapDirectory.cs
+Lucene.Net/Store/NativeFSLockFactory.cs
+Lucene.Net/Store/NIOFSDirectory.cs
+Lucene.Net/Store/NoLockFactory.cs
+Lucene.Net/Store/NoSuchDirectoryException.cs
+Lucene.Net/Store/RAMDirectory.cs
+Lucene.Net/Store/RAMFile.cs
+Lucene.Net/Store/RAMInputStream.cs
+Lucene.Net/Store/RAMOutputStream.cs
+Lucene.Net/Store/SimpleFSDirectory.cs
+Lucene.Net/Store/SimpleFSLockFactory.cs
+Lucene.Net/Store/SingleInstanceLockFactory.cs
+Lucene.Net/Store/VerifyingLockFactory.cs
+Lucene.Net/SupportClass.cs
+Lucene.Net/Util/ArrayUtil.cs
+Lucene.Net/Util/Attribute.cs
+Lucene.Net/Util/AttributeImpl.cs
+Lucene.Net/Util/AttributeSource.cs
+Lucene.Net/Util/AverageGuessMemoryModel.cs
+Lucene.Net/Util/BitUtil.cs
+Lucene.Net/Util/BitVector.cs
+Lucene.Net/Util/Cache/Cache.cs
+Lucene.Net/Util/Cache/SimpleLRUCache.cs
+Lucene.Net/Util/Cache/SimpleMapCache.cs
+Lucene.Net/Util/CloseableThreadLocal-old.cs
+Lucene.Net/Util/CloseableThreadLocal.cs
+Lucene.Net/Util/Constants.cs
+Lucene.Net/Util/DocIdBitSet.cs
+Lucene.Net/Util/FieldCacheSanityChecker.cs
+Lucene.Net/Util/IndexableBinaryStringTools.cs
+Lucene.Net/Util/MapOfSets.cs
+Lucene.Net/Util/MemoryModel.cs
+Lucene.Net/Util/NumericUtils.cs
+Lucene.Net/Util/OpenBitSet.cs
+Lucene.Net/Util/OpenBitSetDISI.cs
+Lucene.Net/Util/OpenBitSetIterator.cs
+Lucene.Net/Util/Parameter.cs
+Lucene.Net/Util/PriorityQueue.cs
+Lucene.Net/Util/RamUsageEstimator.cs
+Lucene.Net/Util/ReaderUtil.cs
+Lucene.Net/Util/ScorerDocQueue.cs
+Lucene.Net/Util/SimpleStringInterner.cs
+Lucene.Net/Util/SmallFloat.cs
+Lucene.Net/Util/SortedVIntList.cs
+Lucene.Net/Util/SorterTemplate.cs
+Lucene.Net/Util/StringHelper.cs
+Lucene.Net/Util/StringInterner.cs
+Lucene.Net/Util/ToStringUtils.cs
+Lucene.Net/Util/UnicodeUtil.cs
+Lucene.Net/Util/Version.cs
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/.gitattributes b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/.gitattributes
new file mode 100644 (file)
index 0000000..3d812f0
--- /dev/null
@@ -0,0 +1,4 @@
+/AssemblyInfo.cs -crlf
+/Lucene.Net.xml -crlf
+/Overview.html -crlf
+/SupportClass.cs -crlf
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/.gitattributes b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/.gitattributes
new file mode 100644 (file)
index 0000000..460ad06
--- /dev/null
@@ -0,0 +1,37 @@
+/ASCIIFoldingFilter.cs -crlf
+/Analyzer.cs -crlf
+/BaseCharFilter.cs -crlf
+/CachingTokenFilter.cs -crlf
+/CharArraySet.cs -crlf
+/CharFilter.cs -crlf
+/CharReader.cs -crlf
+/CharStream.cs -crlf
+/CharTokenizer.cs -crlf
+/CharacterCache.cs -crlf
+/ISOLatin1AccentFilter.cs -crlf
+/KeywordAnalyzer.cs -crlf
+/KeywordTokenizer.cs -crlf
+/LengthFilter.cs -crlf
+/LetterTokenizer.cs -crlf
+/LowerCaseFilter.cs -crlf
+/LowerCaseTokenizer.cs -crlf
+/MappingCharFilter.cs -crlf
+/NormalizeCharMap.cs -crlf
+/NumericTokenStream.cs -crlf
+/PerFieldAnalyzerWrapper.cs -crlf
+/PorterStemFilter.cs -crlf
+/PorterStemmer.cs -crlf
+/SimpleAnalyzer.cs -crlf
+/SinkTokenizer.cs -crlf
+/StopAnalyzer.cs -crlf
+/StopFilter.cs -crlf
+/TeeSinkTokenFilter.cs -crlf
+/TeeTokenFilter.cs -crlf
+/Token.cs -crlf
+/TokenFilter.cs -crlf
+/TokenStream.cs -crlf
+/TokenWrapper.cs -crlf
+/Tokenizer.cs -crlf
+/WhitespaceAnalyzer.cs -crlf
+/WhitespaceTokenizer.cs -crlf
+/WordlistLoader.cs -crlf
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/ASCIIFoldingFilter.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/ASCIIFoldingFilter.cs
new file mode 100644 (file)
index 0000000..eef7975
--- /dev/null
@@ -0,0 +1,3287 @@
+๏ปฟ/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using TermAttribute = Mono.Lucene.Net.Analysis.Tokenattributes.TermAttribute;
+using ArrayUtil = Mono.Lucene.Net.Util.ArrayUtil;
+
+namespace Mono.Lucene.Net.Analysis
+{
+       
+       /// <summary> This class converts alphabetic, numeric, and symbolic Unicode characters
+       /// which are not in the first 127 ASCII characters (the "Basic Latin" Unicode
+       /// block) into their ASCII equivalents, if one exists.
+       /// 
+       /// Characters from the following Unicode blocks are converted; however, only
+       /// those characters with reasonable ASCII alternatives are converted:
+       /// 
+       /// <ul>
+       /// <li>C1 Controls and Latin-1 Supplement: <a href="http://www.unicode.org/charts/PDF/U0080.pdf">http://www.unicode.org/charts/PDF/U0080.pdf</a></li>
+    /// <li>Latin Extended-A: <a href="http://www.unicode.org/charts/PDF/U0100.pdf">http://www.unicode.org/charts/PDF/U0100.pdf</a></li>
+    /// <li>Latin Extended-B: <a href="http://www.unicode.org/charts/PDF/U0180.pdf">http://www.unicode.org/charts/PDF/U0180.pdf</a></li>
+    /// <li>Latin Extended Additional: <a href="http://www.unicode.org/charts/PDF/U1E00.pdf">http://www.unicode.org/charts/PDF/U1E00.pdf</a></li>
+    /// <li>Latin Extended-C: <a href="http://www.unicode.org/charts/PDF/U2C60.pdf">http://www.unicode.org/charts/PDF/U2C60.pdf</a></li>
+    /// <li>Latin Extended-D: <a href="http://www.unicode.org/charts/PDF/UA720.pdf">http://www.unicode.org/charts/PDF/UA720.pdf</a></li>
+    /// <li>IPA Extensions: <a href="http://www.unicode.org/charts/PDF/U0250.pdf">http://www.unicode.org/charts/PDF/U0250.pdf</a></li>
+    /// <li>Phonetic Extensions: <a href="http://www.unicode.org/charts/PDF/U1D00.pdf">http://www.unicode.org/charts/PDF/U1D00.pdf</a></li>
+    /// <li>Phonetic Extensions Supplement: <a href="http://www.unicode.org/charts/PDF/U1D80.pdf">http://www.unicode.org/charts/PDF/U1D80.pdf</a></li>
+    /// <li>General Punctuation: <a href="http://www.unicode.org/charts/PDF/U2000.pdf">http://www.unicode.org/charts/PDF/U2000.pdf</a></li>
+    /// <li>Superscripts and Subscripts: <a href="http://www.unicode.org/charts/PDF/U2070.pdf">http://www.unicode.org/charts/PDF/U2070.pdf</a></li>
+    /// <li>Enclosed Alphanumerics: <a href="http://www.unicode.org/charts/PDF/U2460.pdf">http://www.unicode.org/charts/PDF/U2460.pdf</a></li>
+    /// <li>Dingbats: <a href="http://www.unicode.org/charts/PDF/U2700.pdf">http://www.unicode.org/charts/PDF/U2700.pdf</a></li>
+    /// <li>Supplemental Punctuation: <a href="http://www.unicode.org/charts/PDF/U2E00.pdf">http://www.unicode.org/charts/PDF/U2E00.pdf</a></li>
+    /// <li>Alphabetic Presentation Forms: <a href="http://www.unicode.org/charts/PDF/UFB00.pdf">http://www.unicode.org/charts/PDF/UFB00.pdf</a></li>
+    /// <li>Halfwidth and Fullwidth Forms: <a href="http://www.unicode.org/charts/PDF/UFF00.pdf">http://www.unicode.org/charts/PDF/UFF00.pdf</a></li>
+       /// </ul>
+       /// 
+       /// See: <a href="http://en.wikipedia.org/wiki/Latin_characters_in_Unicode">http://en.wikipedia.org/wiki/Latin_characters_in_Unicode</a>
+       /// 
+       /// The set of character conversions supported by this class is a superset of
+       /// those supported by Lucene's {@link ISOLatin1AccentFilter} which strips
+       /// accents from Latin1 characters.  For example, '&#192;' will be replaced by
+       /// 'a'.
+       /// </summary>
+       public sealed class ASCIIFoldingFilter:TokenFilter
+       {
+               public ASCIIFoldingFilter(TokenStream input):base(input)
+               {
+                       termAtt = (TermAttribute) AddAttribute(typeof(TermAttribute));
+               }
+               
+               private char[] output = new char[512];
+               private int outputPos;
+               private TermAttribute termAtt;
+               
+               public override bool IncrementToken()
+               {
+                       if (input.IncrementToken())
+                       {
+                               char[] buffer = termAtt.TermBuffer();
+                               int length = termAtt.TermLength();
+                               
+                               // If no characters actually require rewriting then we
+                               // just return token as-is:
+                               for (int i = 0; i < length; ++i)
+                               {
+                                       char c = buffer[i];
+                                       if (c >= '\u0080')
+                                       {
+                                               FoldToASCII(buffer, length);
+                                               termAtt.SetTermBuffer(output, 0, outputPos);
+                                               break;
+                                       }
+                               }
+                               return true;
+                       }
+                       else
+                       {
+                               return false;
+                       }
+               }
+               
+               /// <summary> Converts characters above ASCII to their ASCII equivalents.  For example,
+               /// accents are removed from accented characters.
+               /// </summary>
+               /// <param name="input">The string to fold
+               /// </param>
+               /// <param name="length">The number of characters in the input string
+               /// </param>
+               public void  FoldToASCII(char[] input, int length)
+               {
+                       // Worst-case length required:
+                       int maxSizeNeeded = 4 * length;
+                       if (output.Length < maxSizeNeeded)
+                       {
+                               output = new char[ArrayUtil.GetNextSize(maxSizeNeeded)];
+                       }
+                       
+                       outputPos = 0;
+                       
+                       for (int pos = 0; pos < length; ++pos)
+                       {
+                               char c = input[pos];
+                               
+                               // Quick test: if it's not in range then just keep current character
+                               if (c < '\u0080')
+                               {
+                                       output[outputPos++] = c;
+                               }
+                               else
+                               {
+                                       switch (c)
+                                       {
+                                               
+                                               case '\u00C0': 
+                                               // รƒฦ’รขโ€šยฌ  [LATIN CAPITAL LETTER A WITH GRAVE]
+                                               case '\u00C1': 
+                                               // รƒฦ’รฏยฟยฝ  [LATIN CAPITAL LETTER A WITH ACUTE]
+                                               case '\u00C2': 
+                                               // รƒฦ’รขโ‚ฌลก  [LATIN CAPITAL LETTER A WITH CIRCUMFLEX]
+                                               case '\u00C3': 
+                                               // รƒฦ’ร†โ€™  [LATIN CAPITAL LETTER A WITH TILDE]
+                                               case '\u00C4': 
+                                               // รƒฦ’รขโ‚ฌลพ  [LATIN CAPITAL LETTER A WITH DIAERESIS]
+                                               case '\u00C5': 
+                                               // รƒฦ’รขโ‚ฌยฆ  [LATIN CAPITAL LETTER A WITH RING ABOVE]
+                                               case '\u0100': 
+                                               // รƒโ€žรขโ€šยฌ  [LATIN CAPITAL LETTER A WITH MACRON]
+                                               case '\u0102': 
+                                               // รƒโ€žรขโ‚ฌลก  [LATIN CAPITAL LETTER A WITH BREVE]
+                                               case '\u0104': 
+                                               // รƒโ€žรขโ‚ฌลพ  [LATIN CAPITAL LETTER A WITH OGONEK]
+                                               case '\u018F': 
+                                               // รƒโ€ รฏยฟยฝ  http://en.wikipedia.org/wiki/Schwa  [LATIN CAPITAL LETTER SCHWA]
+                                               case '\u01CD': 
+                                               // รƒโ€กรฏยฟยฝ  [LATIN CAPITAL LETTER A WITH CARON]
+                                               case '\u01DE': 
+                                               // รƒโ€กร…ยพ  [LATIN CAPITAL LETTER A WITH DIAERESIS AND MACRON]
+                                               case '\u01E0': 
+                                               // รƒโ€กร‚ย   [LATIN CAPITAL LETTER A WITH DOT ABOVE AND MACRON]
+                                               case '\u01FA': 
+                                               // รƒโ€กร‚ยบ  [LATIN CAPITAL LETTER A WITH RING ABOVE AND ACUTE]
+                                               case '\u0200': 
+                                               // รƒห†รขโ€šยฌ  [LATIN CAPITAL LETTER A WITH DOUBLE GRAVE]
+                                               case '\u0202': 
+                                               // รƒห†รขโ‚ฌลก  [LATIN CAPITAL LETTER A WITH INVERTED BREVE]
+                                               case '\u0226': 
+                                               // รƒห†ร‚ยฆ  [LATIN CAPITAL LETTER A WITH DOT ABOVE]
+                                               case '\u023A': 
+                                               // รƒห†ร‚ยบ  [LATIN CAPITAL LETTER A WITH STROKE]
+                                               case '\u1D00': 
+                                               // รƒยกร‚ยดรขโ€šยฌ  [LATIN LETTER SMALL CAPITAL A]
+                                               case '\u1E00': 
+                                               // รƒยกร‚ยธรขโ€šยฌ  [LATIN CAPITAL LETTER A WITH RING BELOW]
+                                               case '\u1EA0': 
+                                               // รƒยกร‚ยบร‚ย   [LATIN CAPITAL LETTER A WITH DOT BELOW]
+                                               case '\u1EA2': 
+                                               // รƒยกร‚ยบร‚ยข  [LATIN CAPITAL LETTER A WITH HOOK ABOVE]
+                                               case '\u1EA4': 
+                                               // รƒยกร‚ยบร‚ยค  [LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND ACUTE]
+                                               case '\u1EA6': 
+                                               // รƒยกร‚ยบร‚ยฆ  [LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND GRAVE]
+                                               case '\u1EA8': 
+                                               // รƒยกร‚ยบร‚ยจ  [LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND HOOK ABOVE]
+                                               case '\u1EAA': 
+                                               // รƒยกร‚ยบร‚ยช  [LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND TILDE]
+                                               case '\u1EAC': 
+                                               // รƒยกร‚ยบร‚ยฌ  [LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND DOT BELOW]
+                                               case '\u1EAE': 
+                                               // รƒยกร‚ยบร‚ยฎ  [LATIN CAPITAL LETTER A WITH BREVE AND ACUTE]
+                                               case '\u1EB0': 
+                                               // รƒยกร‚ยบร‚ยฐ  [LATIN CAPITAL LETTER A WITH BREVE AND GRAVE]
+                                               case '\u1EB2': 
+                                               // รƒยกร‚ยบร‚ยฒ  [LATIN CAPITAL LETTER A WITH BREVE AND HOOK ABOVE]
+                                               case '\u1EB4': 
+                                               // รƒยกร‚ยบร‚ยด  [LATIN CAPITAL LETTER A WITH BREVE AND TILDE]
+                                               case '\u1EB6': 
+                                               // รƒยกร‚ยบร‚ยถ  [LATIN CAPITAL LETTER A WITH BREVE AND DOT BELOW]
+                                               case '\u24B6': 
+                                               // รƒยขรขโ‚ฌโ„ขร‚ยถ  [CIRCLED LATIN CAPITAL LETTER A]
+                                               case '\uFF21':  // รƒยฏร‚ยผร‚ยก  [FULLWIDTH LATIN CAPITAL LETTER A]
+                                                       output[outputPos++] = 'A';
+                                                       break;
+                                               
+                                               case '\u00E0': 
+                                               // รƒฦ’ร‚ย   [LATIN SMALL LETTER A WITH GRAVE]
+                                               case '\u00E1': 
+                                               // รƒฦ’ร‚ยก  [LATIN SMALL LETTER A WITH ACUTE]
+                                               case '\u00E2': 
+                                               // รƒฦ’ร‚ยข  [LATIN SMALL LETTER A WITH CIRCUMFLEX]
+                                               case '\u00E3': 
+                                               // รƒฦ’ร‚ยฃ  [LATIN SMALL LETTER A WITH TILDE]
+                                               case '\u00E4': 
+                                               // รƒฦ’ร‚ยค  [LATIN SMALL LETTER A WITH DIAERESIS]
+                                               case '\u00E5': 
+                                               // รƒฦ’ร‚ยฅ  [LATIN SMALL LETTER A WITH RING ABOVE]
+                                               case '\u0101': 
+                                               // รƒโ€žรฏยฟยฝ  [LATIN SMALL LETTER A WITH MACRON]
+                                               case '\u0103': 
+                                               // รƒโ€žร†โ€™  [LATIN SMALL LETTER A WITH BREVE]
+                                               case '\u0105': 
+                                               // รƒโ€žรขโ‚ฌยฆ  [LATIN SMALL LETTER A WITH OGONEK]
+                                               case '\u01CE': 
+                                               // รƒโ€กร…ยฝ  [LATIN SMALL LETTER A WITH CARON]
+                                               case '\u01DF': 
+                                               // รƒโ€กร…ยธ  [LATIN SMALL LETTER A WITH DIAERESIS AND MACRON]
+                                               case '\u01E1': 
+                                               // รƒโ€กร‚ยก  [LATIN SMALL LETTER A WITH DOT ABOVE AND MACRON]
+                                               case '\u01FB': 
+                                               // รƒโ€กร‚ยป  [LATIN SMALL LETTER A WITH RING ABOVE AND ACUTE]
+                                               case '\u0201': 
+                                               // รƒห†รฏยฟยฝ  [LATIN SMALL LETTER A WITH DOUBLE GRAVE]
+                                               case '\u0203': 
+                                               // รƒห†ร†โ€™  [LATIN SMALL LETTER A WITH INVERTED BREVE]
+                                               case '\u0227': 
+                                               // รƒห†ร‚ยง  [LATIN SMALL LETTER A WITH DOT ABOVE]
+                                               case '\u0250': 
+                                               // รƒโ€ฐรฏยฟยฝ  [LATIN SMALL LETTER TURNED A]
+                                               case '\u0259': 
+                                               // รƒโ€ฐรขโ€žยข  [LATIN SMALL LETTER SCHWA]
+                                               case '\u025A': 
+                                               // รƒโ€ฐร…ยก  [LATIN SMALL LETTER SCHWA WITH HOOK]
+                                               case '\u1D8F': 
+                                               // รƒยกร‚ยถรฏยฟยฝ  [LATIN SMALL LETTER A WITH RETROFLEX HOOK]
+                                               case '\u1D95': 
+                                               // รƒยกร‚ยถรขโ‚ฌยข  [LATIN SMALL LETTER SCHWA WITH RETROFLEX HOOK]
+                                               case '\u1E01': 
+                                               // รƒยกร‚ยบร‚ยก  [LATIN SMALL LETTER A WITH RING BELOW]
+                                               case '\u1E9A': 
+                                               // รƒยกร‚ยบร‚ยฃ  [LATIN SMALL LETTER A WITH RIGHT HALF RING]
+                                               case '\u1EA1': 
+                                               // รƒยกร‚ยบร‚ยก  [LATIN SMALL LETTER A WITH DOT BELOW]
+                                               case '\u1EA3': 
+                                               // รƒยกร‚ยบร‚ยฃ  [LATIN SMALL LETTER A WITH HOOK ABOVE]
+                                               case '\u1EA5': 
+                                               // รƒยกร‚ยบร‚ยฅ  [LATIN SMALL LETTER A WITH CIRCUMFLEX AND ACUTE]
+                                               case '\u1EA7': 
+                                               // รƒยกร‚ยบร‚ยง  [LATIN SMALL LETTER A WITH CIRCUMFLEX AND GRAVE]
+                                               case '\u1EA9': 
+                                               // รƒยกร‚ยบร‚ยฉ  [LATIN SMALL LETTER A WITH CIRCUMFLEX AND HOOK ABOVE]
+                                               case '\u1EAB': 
+                                               // รƒยกร‚ยบร‚ยซ  [LATIN SMALL LETTER A WITH CIRCUMFLEX AND TILDE]
+                                               case '\u1EAD': 
+                                               // รƒยกร‚ยบร‚ยญ  [LATIN SMALL LETTER A WITH CIRCUMFLEX AND DOT BELOW]
+                                               case '\u1EAF': 
+                                               // รƒยกร‚ยบร‚ยฏ  [LATIN SMALL LETTER A WITH BREVE AND ACUTE]
+                                               case '\u1EB1': 
+                                               // รƒยกร‚ยบร‚ยฑ  [LATIN SMALL LETTER A WITH BREVE AND GRAVE]
+                                               case '\u1EB3': 
+                                               // รƒยกร‚ยบร‚ยณ  [LATIN SMALL LETTER A WITH BREVE AND HOOK ABOVE]
+                                               case '\u1EB5': 
+                                               // รƒยกร‚ยบร‚ยต  [LATIN SMALL LETTER A WITH BREVE AND TILDE]
+                                               case '\u1EB7': 
+                                               // รƒยกร‚ยบร‚ยท  [LATIN SMALL LETTER A WITH BREVE AND DOT BELOW]
+                                               case '\u2090': 
+                                               // รƒยขรขโ‚ฌลกรฏยฟยฝ  [LATIN SUBSCRIPT SMALL LETTER A]
+                                               case '\u2094': 
+                                               // รƒยขรขโ‚ฌลกรฏยฟยฝ?  [LATIN SUBSCRIPT SMALL LETTER SCHWA]
+                                               case '\u24D0': 
+                                               // รƒยขรขโ‚ฌล“รฏยฟยฝ  [CIRCLED LATIN SMALL LETTER A]
+                                               case '\u2C65': 
+                                               // รƒยขร‚ยฑร‚ยฅ  [LATIN SMALL LETTER A WITH STROKE]
+                                               case '\u2C6F': 
+                                               // รƒยขร‚ยฑร‚ยฏ  [LATIN CAPITAL LETTER TURNED A]
+                                               case '\uFF41':  // รƒยฏร‚ยฝรฏยฟยฝ  [FULLWIDTH LATIN SMALL LETTER A]
+                                                       output[outputPos++] = 'a';
+                                                       break;
+                                               
+                                               case '\uA732':  // รƒยชร…โ€œร‚ยฒ  [LATIN CAPITAL LETTER AA]
+                                                       output[outputPos++] = 'A';
+                                                       output[outputPos++] = 'A';
+                                                       break;
+                                               
+                                               case '\u00C6': 
+                                               // รƒฦ’รขโ‚ฌย   [LATIN CAPITAL LETTER AE]
+                                               case '\u01E2': 
+                                               // รƒโ€กร‚ยข  [LATIN CAPITAL LETTER AE WITH MACRON]
+                                               case '\u01FC': 
+                                               // รƒโ€กร‚ยผ  [LATIN CAPITAL LETTER AE WITH ACUTE]
+                                               case '\u1D01':  // รƒยกร‚ยดรฏยฟยฝ  [LATIN LETTER SMALL CAPITAL AE]
+                                                       output[outputPos++] = 'A';
+                                                       output[outputPos++] = 'E';
+                                                       break;
+                                               
+                                               case '\uA734':  // รƒยชร…โ€œร‚ยด  [LATIN CAPITAL LETTER AO]
+                                                       output[outputPos++] = 'A';
+                                                       output[outputPos++] = 'O';
+                                                       break;
+                                               
+                                               case '\uA736':  // รƒยชร…โ€œร‚ยถ  [LATIN CAPITAL LETTER AU]
+                                                       output[outputPos++] = 'A';
+                                                       output[outputPos++] = 'U';
+                                                       break;
+                                               
+                                               case '\uA738': 
+                                               // รƒยชร…โ€œร‚ยธ  [LATIN CAPITAL LETTER AV]
+                                               case '\uA73A':  // รƒยชร…โ€œร‚ยบ  [LATIN CAPITAL LETTER AV WITH HORIZONTAL BAR]
+                                                       output[outputPos++] = 'A';
+                                                       output[outputPos++] = 'V';
+                                                       break;
+                                               
+                                               case '\uA73C':  // รƒยชร…โ€œร‚ยผ  [LATIN CAPITAL LETTER AY]
+                                                       output[outputPos++] = 'A';
+                                                       output[outputPos++] = 'Y';
+                                                       break;
+                                               
+                                               case '\u249C':  // รƒยขรขโ‚ฌโ„ขร…โ€œ  [PARENTHESIZED LATIN SMALL LETTER A]
+                                                       output[outputPos++] = '(';
+                                                       output[outputPos++] = 'a';
+                                                       output[outputPos++] = ')';
+                                                       break;
+                                               
+                                               case '\uA733':  // รƒยชร…โ€œร‚ยณ  [LATIN SMALL LETTER AA]
+                                                       output[outputPos++] = 'a';
+                                                       output[outputPos++] = 'a';
+                                                       break;
+                                               
+                                               case '\u00E6': 
+                                               // รƒฦ’ร‚ยฆ  [LATIN SMALL LETTER AE]
+                                               case '\u01E3': 
+                                               // รƒโ€กร‚ยฃ  [LATIN SMALL LETTER AE WITH MACRON]
+                                               case '\u01FD': 
+                                               // รƒโ€กร‚ยฝ  [LATIN SMALL LETTER AE WITH ACUTE]
+                                               case '\u1D02':  // รƒยกร‚ยดรขโ‚ฌลก  [LATIN SMALL LETTER TURNED AE]
+                                                       output[outputPos++] = 'a';
+                                                       output[outputPos++] = 'e';
+                                                       break;
+                                               
+                                               case '\uA735':  // รƒยชร…โ€œร‚ยต  [LATIN SMALL LETTER AO]
+                                                       output[outputPos++] = 'a';
+                                                       output[outputPos++] = 'o';
+                                                       break;
+                                               
+                                               case '\uA737':  // รƒยชร…โ€œร‚ยท  [LATIN SMALL LETTER AU]
+                                                       output[outputPos++] = 'a';
+                                                       output[outputPos++] = 'u';
+                                                       break;
+                                               
+                                               case '\uA739': 
+                                               // รƒยชร…โ€œร‚ยน  [LATIN SMALL LETTER AV]
+                                               case '\uA73B':  // รƒยชร…โ€œร‚ยป  [LATIN SMALL LETTER AV WITH HORIZONTAL BAR]
+                                                       output[outputPos++] = 'a';
+                                                       output[outputPos++] = 'v';
+                                                       break;
+                                               
+                                               case '\uA73D':  // รƒยชร…โ€œร‚ยฝ  [LATIN SMALL LETTER AY]
+                                                       output[outputPos++] = 'a';
+                                                       output[outputPos++] = 'y';
+                                                       break;
+                                               
+                                               case '\u0181': 
+                                               // รƒโ€ รฏยฟยฝ  [LATIN CAPITAL LETTER B WITH HOOK]
+                                               case '\u0182': 
+                                               // รƒโ€ รขโ‚ฌลก  [LATIN CAPITAL LETTER B WITH TOPBAR]
+                                               case '\u0243': 
+                                               // รƒโ€ฐร†โ€™  [LATIN CAPITAL LETTER B WITH STROKE]
+                                               case '\u0299': 
+                                               // รƒล รขโ€žยข  [LATIN LETTER SMALL CAPITAL B]
+                                               case '\u1D03': 
+                                               // รƒยกร‚ยดร†โ€™  [LATIN LETTER SMALL CAPITAL BARRED B]
+                                               case '\u1E02': 
+                                               // รƒยกร‚ยธรขโ‚ฌลก  [LATIN CAPITAL LETTER B WITH DOT ABOVE]
+                                               case '\u1E04': 
+                                               // รƒยกร‚ยธรขโ‚ฌลพ  [LATIN CAPITAL LETTER B WITH DOT BELOW]
+                                               case '\u1E06': 
+                                               // รƒยกร‚ยธรขโ‚ฌย   [LATIN CAPITAL LETTER B WITH LINE BELOW]
+                                               case '\u24B7': 
+                                               // รƒยขรขโ‚ฌโ„ขร‚ยท  [CIRCLED LATIN CAPITAL LETTER B]
+                                               case '\uFF22':  // รƒยฏร‚ยผร‚ยข  [FULLWIDTH LATIN CAPITAL LETTER B]
+                                                       output[outputPos++] = 'B';
+                                                       break;
+                                               
+                                               case '\u0180': 
+                                               // รƒโ€ รขโ€šยฌ  [LATIN SMALL LETTER B WITH STROKE]
+                                               case '\u0183': 
+                                               // รƒโ€ ร†โ€™  [LATIN SMALL LETTER B WITH TOPBAR]
+                                               case '\u0253': 
+                                               // รƒโ€ฐรขโ‚ฌล“  [LATIN SMALL LETTER B WITH HOOK]
+                                               case '\u1D6C': 
+                                               // รƒยกร‚ยตร‚ยฌ  [LATIN SMALL LETTER B WITH MIDDLE TILDE]
+                                               case '\u1D80': 
+                                               // รƒยกร‚ยถรขโ€šยฌ  [LATIN SMALL LETTER B WITH PALATAL HOOK]
+                                               case '\u1E03': 
+                                               // รƒยกร‚ยธร†โ€™  [LATIN SMALL LETTER B WITH DOT ABOVE]
+                                               case '\u1E05': 
+                                               // รƒยกร‚ยธรขโ‚ฌยฆ  [LATIN SMALL LETTER B WITH DOT BELOW]
+                                               case '\u1E07': 
+                                               // รƒยกร‚ยธรขโ‚ฌยก  [LATIN SMALL LETTER B WITH LINE BELOW]
+                                               case '\u24D1': 
+                                               // รƒยขรขโ‚ฌล“รขโ‚ฌหœ  [CIRCLED LATIN SMALL LETTER B]
+                                               case '\uFF42':  // รƒยฏร‚ยฝรขโ‚ฌลก  [FULLWIDTH LATIN SMALL LETTER B]
+                                                       output[outputPos++] = 'b';
+                                                       break;
+                                               
+                                               case '\u249D':  // รƒยขรขโ‚ฌโ„ขรฏยฟยฝ  [PARENTHESIZED LATIN SMALL LETTER B]
+                                                       output[outputPos++] = '(';
+                                                       output[outputPos++] = 'b';
+                                                       output[outputPos++] = ')';
+                                                       break;
+                                               
+                                               case '\u00C7': 
+                                               // รƒฦ’รขโ‚ฌยก  [LATIN CAPITAL LETTER C WITH CEDILLA]
+                                               case '\u0106': 
+                                               // รƒโ€žรขโ‚ฌย   [LATIN CAPITAL LETTER C WITH ACUTE]
+                                               case '\u0108': 
+                                               // รƒโ€žร‹โ€   [LATIN CAPITAL LETTER C WITH CIRCUMFLEX]
+                                               case '\u010A': 
+                                               // รƒโ€žร…ย   [LATIN CAPITAL LETTER C WITH DOT ABOVE]
+                                               case '\u010C': 
+                                               // รƒโ€žร…โ€™  [LATIN CAPITAL LETTER C WITH CARON]
+                                               case '\u0187': 
+                                               // รƒโ€ รขโ‚ฌยก  [LATIN CAPITAL LETTER C WITH HOOK]
+                                               case '\u023B': 
+                                               // รƒห†ร‚ยป  [LATIN CAPITAL LETTER C WITH STROKE]
+                                               case '\u0297': 
+                                               // รƒล รขโ‚ฌโ€  [LATIN LETTER STRETCHED C]
+                                               case '\u1D04': 
+                                               // รƒยกร‚ยดรขโ‚ฌลพ  [LATIN LETTER SMALL CAPITAL C]
+                                               case '\u1E08': 
+                                               // รƒยกร‚ยธร‹โ€   [LATIN CAPITAL LETTER C WITH CEDILLA AND ACUTE]
+                                               case '\u24B8': 
+                                               // รƒยขรขโ‚ฌโ„ขร‚ยธ  [CIRCLED LATIN CAPITAL LETTER C]
+                                               case '\uFF23':  // รƒยฏร‚ยผร‚ยฃ  [FULLWIDTH LATIN CAPITAL LETTER C]
+                                                       output[outputPos++] = 'C';
+                                                       break;
+                                               
+                                               case '\u00E7': 
+                                               // รƒฦ’ร‚ยง  [LATIN SMALL LETTER C WITH CEDILLA]
+                                               case '\u0107': 
+                                               // รƒโ€žรขโ‚ฌยก  [LATIN SMALL LETTER C WITH ACUTE]
+                                               case '\u0109': 
+                                               // รƒโ€žรขโ‚ฌยฐ  [LATIN SMALL LETTER C WITH CIRCUMFLEX]
+                                               case '\u010B': 
+                                               // รƒโ€žรขโ‚ฌยน  [LATIN SMALL LETTER C WITH DOT ABOVE]
+                                               case '\u010D': 
+                                               // รƒโ€žรฏยฟยฝ  [LATIN SMALL LETTER C WITH CARON]
+                                               case '\u0188': 
+                                               // รƒโ€ ร‹โ€   [LATIN SMALL LETTER C WITH HOOK]
+                                               case '\u023C': 
+                                               // รƒห†ร‚ยผ  [LATIN SMALL LETTER C WITH STROKE]
+                                               case '\u0255': 
+                                               // รƒโ€ฐรขโ‚ฌยข  [LATIN SMALL LETTER C WITH CURL]
+                                               case '\u1E09': 
+                                               // รƒยกร‚ยธรขโ‚ฌยฐ  [LATIN SMALL LETTER C WITH CEDILLA AND ACUTE]
+                                               case '\u2184': 
+                                               // รƒยขรขโ‚ฌย รขโ‚ฌลพ  [LATIN SMALL LETTER REVERSED C]
+                                               case '\u24D2': 
+                                               // รƒยขรขโ‚ฌล“รขโ‚ฌโ„ข  [CIRCLED LATIN SMALL LETTER C]
+                                               case '\uA73E': 
+                                               // รƒยชร…โ€œร‚ยพ  [LATIN CAPITAL LETTER REVERSED C WITH DOT]
+                                               case '\uA73F': 
+                                               // รƒยชร…โ€œร‚ยฟ  [LATIN SMALL LETTER REVERSED C WITH DOT]
+                                               case '\uFF43':  // รƒยฏร‚ยฝร†โ€™  [FULLWIDTH LATIN SMALL LETTER C]
+                                                       output[outputPos++] = 'c';
+                                                       break;
+                                               
+                                               case '\u249E':  // รƒยขรขโ‚ฌโ„ขร…ยพ  [PARENTHESIZED LATIN SMALL LETTER C]
+                                                       output[outputPos++] = '(';
+                                                       output[outputPos++] = 'c';
+                                                       output[outputPos++] = ')';
+                                                       break;
+                                               
+                                               case '\u00D0': 
+                                               // รƒฦ’รฏยฟยฝ  [LATIN CAPITAL LETTER ETH]
+                                               case '\u010E': 
+                                               // รƒโ€žร…ยฝ  [LATIN CAPITAL LETTER D WITH CARON]
+                                               case '\u0110': 
+                                               // รƒโ€žรฏยฟยฝ  [LATIN CAPITAL LETTER D WITH STROKE]
+                                               case '\u0189': 
+                                               // รƒโ€ รขโ‚ฌยฐ  [LATIN CAPITAL LETTER AFRICAN D]
+                                               case '\u018A': 
+                                               // รƒโ€ ร…ย   [LATIN CAPITAL LETTER D WITH HOOK]
+                                               case '\u018B': 
+                                               // รƒโ€ รขโ‚ฌยน  [LATIN CAPITAL LETTER D WITH TOPBAR]
+                                               case '\u1D05': 
+                                               // รƒยกร‚ยดรขโ‚ฌยฆ  [LATIN LETTER SMALL CAPITAL D]
+                                               case '\u1D06': 
+                                               // รƒยกร‚ยดรขโ‚ฌย   [LATIN LETTER SMALL CAPITAL ETH]
+                                               case '\u1E0A': 
+                                               // รƒยกร‚ยธร…ย   [LATIN CAPITAL LETTER D WITH DOT ABOVE]
+                                               case '\u1E0C': 
+                                               // รƒยกร‚ยธร…โ€™  [LATIN CAPITAL LETTER D WITH DOT BELOW]
+                                               case '\u1E0E': 
+                                               // รƒยกร‚ยธร…ยฝ  [LATIN CAPITAL LETTER D WITH LINE BELOW]
+                                               case '\u1E10': 
+                                               // รƒยกร‚ยธรฏยฟยฝ  [LATIN CAPITAL LETTER D WITH CEDILLA]
+                                               case '\u1E12': 
+                                               // รƒยกร‚ยธรขโ‚ฌโ„ข  [LATIN CAPITAL LETTER D WITH CIRCUMFLEX BELOW]
+                                               case '\u24B9': 
+                                               // รƒยขรขโ‚ฌโ„ขร‚ยน  [CIRCLED LATIN CAPITAL LETTER D]
+                                               case '\uA779': 
+                                               // รƒยชรฏยฟยฝร‚ยน  [LATIN CAPITAL LETTER INSULAR D]
+                                               case '\uFF24':  // รƒยฏร‚ยผร‚ยค  [FULLWIDTH LATIN CAPITAL LETTER D]
+                                                       output[outputPos++] = 'D';
+                                                       break;
+                                               
+                                               case '\u00F0': 
+                                               // รƒฦ’ร‚ยฐ  [LATIN SMALL LETTER ETH]
+                                               case '\u010F': 
+                                               // รƒโ€žรฏยฟยฝ  [LATIN SMALL LETTER D WITH CARON]
+                                               case '\u0111': 
+                                               // รƒโ€žรขโ‚ฌหœ  [LATIN SMALL LETTER D WITH STROKE]
+                                               case '\u018C': 
+                                               // รƒโ€ ร…โ€™  [LATIN SMALL LETTER D WITH TOPBAR]
+                                               case '\u0221': 
+                                               // รƒห†ร‚ยก  [LATIN SMALL LETTER D WITH CURL]
+                                               case '\u0256': 
+                                               // รƒโ€ฐรขโ‚ฌโ€œ  [LATIN SMALL LETTER D WITH TAIL]
+                                               case '\u0257': 
+                                               // รƒโ€ฐรขโ‚ฌโ€  [LATIN SMALL LETTER D WITH HOOK]
+                                               case '\u1D6D': 
+                                               // รƒยกร‚ยตร‚ยญ  [LATIN SMALL LETTER D WITH MIDDLE TILDE]
+                                               case '\u1D81': 
+                                               // รƒยกร‚ยถรฏยฟยฝ  [LATIN SMALL LETTER D WITH PALATAL HOOK]
+                                               case '\u1D91': 
+                                               // รƒยกร‚ยถรขโ‚ฌหœ  [LATIN SMALL LETTER D WITH HOOK AND TAIL]
+                                               case '\u1E0B': 
+                                               // รƒยกร‚ยธรขโ‚ฌยน  [LATIN SMALL LETTER D WITH DOT ABOVE]
+                                               case '\u1E0D': 
+                                               // รƒยกร‚ยธรฏยฟยฝ  [LATIN SMALL LETTER D WITH DOT BELOW]
+                                               case '\u1E0F': 
+                                               // รƒยกร‚ยธรฏยฟยฝ  [LATIN SMALL LETTER D WITH LINE BELOW]
+                                               case '\u1E11': 
+                                               // รƒยกร‚ยธรขโ‚ฌหœ  [LATIN SMALL LETTER D WITH CEDILLA]
+                                               case '\u1E13': 
+                                               // รƒยกร‚ยธรขโ‚ฌล“  [LATIN SMALL LETTER D WITH CIRCUMFLEX BELOW]
+                                               case '\u24D3': 
+                                               // รƒยขรขโ‚ฌล“รขโ‚ฌล“  [CIRCLED LATIN SMALL LETTER D]
+                                               case '\uA77A': 
+                                               // รƒยชรฏยฟยฝร‚ยบ  [LATIN SMALL LETTER INSULAR D]
+                                               case '\uFF44':  // รƒยฏร‚ยฝรขโ‚ฌลพ  [FULLWIDTH LATIN SMALL LETTER D]
+                                                       output[outputPos++] = 'd';
+                                                       break;
+                                               
+                                               case '\u01C4': 
+                                               // รƒโ€กรขโ‚ฌลพ  [LATIN CAPITAL LETTER DZ WITH CARON]
+                                               case '\u01F1':  // รƒโ€กร‚ยฑ  [LATIN CAPITAL LETTER DZ]
+                                                       output[outputPos++] = 'D';
+                                                       output[outputPos++] = 'Z';
+                                                       break;
+                                               
+                                               case '\u01C5': 
+                                               // รƒโ€กรขโ‚ฌยฆ  [LATIN CAPITAL LETTER D WITH SMALL LETTER Z WITH CARON]
+                                               case '\u01F2':  // รƒโ€กร‚ยฒ  [LATIN CAPITAL LETTER D WITH SMALL LETTER Z]
+                                                       output[outputPos++] = 'D';
+                                                       output[outputPos++] = 'z';
+                                                       break;
+                                               
+                                               case '\u249F':  // รƒยขรขโ‚ฌโ„ขร…ยธ  [PARENTHESIZED LATIN SMALL LETTER D]
+                                                       output[outputPos++] = '(';
+                                                       output[outputPos++] = 'd';
+                                                       output[outputPos++] = ')';
+                                                       break;
+                                               
+                                               case '\u0238':  // รƒห†ร‚ยธ  [LATIN SMALL LETTER DB DIGRAPH]
+                                                       output[outputPos++] = 'd';
+                                                       output[outputPos++] = 'b';
+                                                       break;
+                                               
+                                               case '\u01C6': 
+                                               // รƒโ€กรขโ‚ฌย   [LATIN SMALL LETTER DZ WITH CARON]
+                                               case '\u01F3': 
+                                               // รƒโ€กร‚ยณ  [LATIN SMALL LETTER DZ]
+                                               case '\u02A3': 
+                                               // รƒล ร‚ยฃ  [LATIN SMALL LETTER DZ DIGRAPH]
+                                               case '\u02A5':  // รƒล ร‚ยฅ  [LATIN SMALL LETTER DZ DIGRAPH WITH CURL]
+                                                       output[outputPos++] = 'd';
+                                                       output[outputPos++] = 'z';
+                                                       break;
+                                               
+                                               case '\u00C8': 
+                                               // รƒฦ’ร‹โ€   [LATIN CAPITAL LETTER E WITH GRAVE]
+                                               case '\u00C9': 
+                                               // รƒฦ’รขโ‚ฌยฐ  [LATIN CAPITAL LETTER E WITH ACUTE]
+                                               case '\u00CA': 
+                                               // รƒฦ’ร…ย   [LATIN CAPITAL LETTER E WITH CIRCUMFLEX]
+                                               case '\u00CB': 
+                                               // รƒฦ’รขโ‚ฌยน  [LATIN CAPITAL LETTER E WITH DIAERESIS]
+                                               case '\u0112': 
+                                               // รƒโ€žรขโ‚ฌโ„ข  [LATIN CAPITAL LETTER E WITH MACRON]
+                                               case '\u0114': 
+                                               // รƒโ€žรฏยฟยฝ?  [LATIN CAPITAL LETTER E WITH BREVE]
+                                               case '\u0116': 
+                                               // รƒโ€žรขโ‚ฌโ€œ  [LATIN CAPITAL LETTER E WITH DOT ABOVE]
+                                               case '\u0118': 
+                                               // รƒโ€žร‹ล“  [LATIN CAPITAL LETTER E WITH OGONEK]
+                                               case '\u011A': 
+                                               // รƒโ€žร…ยก  [LATIN CAPITAL LETTER E WITH CARON]
+                                               case '\u018E': 
+                                               // รƒโ€ ร…ยฝ  [LATIN CAPITAL LETTER REVERSED E]
+                                               case '\u0190': 
+                                               // รƒโ€ รฏยฟยฝ  [LATIN CAPITAL LETTER OPEN E]
+                                               case '\u0204': 
+                                               // รƒห†รขโ‚ฌลพ  [LATIN CAPITAL LETTER E WITH DOUBLE GRAVE]
+                                               case '\u0206': 
+                                               // รƒห†รขโ‚ฌย   [LATIN CAPITAL LETTER E WITH INVERTED BREVE]
+                                               case '\u0228': 
+                                               // รƒห†ร‚ยจ  [LATIN CAPITAL LETTER E WITH CEDILLA]
+                                               case '\u0246': 
+                                               // รƒโ€ฐรขโ‚ฌย   [LATIN CAPITAL LETTER E WITH STROKE]
+                                               case '\u1D07': 
+                                               // รƒยกร‚ยดรขโ‚ฌยก  [LATIN LETTER SMALL CAPITAL E]
+                                               case '\u1E14': 
+                                               // รƒยกร‚ยธรฏยฟยฝ?  [LATIN CAPITAL LETTER E WITH MACRON AND GRAVE]
+                                               case '\u1E16': 
+                                               // รƒยกร‚ยธรขโ‚ฌโ€œ  [LATIN CAPITAL LETTER E WITH MACRON AND ACUTE]
+                                               case '\u1E18': 
+                                               // รƒยกร‚ยธร‹ล“  [LATIN CAPITAL LETTER E WITH CIRCUMFLEX BELOW]
+                                               case '\u1E1A': 
+                                               // รƒยกร‚ยธร…ยก  [LATIN CAPITAL LETTER E WITH TILDE BELOW]
+                                               case '\u1E1C': 
+                                               // รƒยกร‚ยธร…โ€œ  [LATIN CAPITAL LETTER E WITH CEDILLA AND BREVE]
+                                               case '\u1EB8': 
+                                               // รƒยกร‚ยบร‚ยธ  [LATIN CAPITAL LETTER E WITH DOT BELOW]
+                                               case '\u1EBA': 
+                                               // รƒยกร‚ยบร‚ยบ  [LATIN CAPITAL LETTER E WITH HOOK ABOVE]
+                                               case '\u1EBC': 
+                                               // รƒยกร‚ยบร‚ยผ  [LATIN CAPITAL LETTER E WITH TILDE]
+                                               case '\u1EBE': 
+                                               // รƒยกร‚ยบร‚ยพ  [LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND ACUTE]
+                                               case '\u1EC0': 
+                                               // รƒยกร‚ยปรขโ€šยฌ  [LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND GRAVE]
+                                               case '\u1EC2': 
+                                               // รƒยกร‚ยปรขโ‚ฌลก  [LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND HOOK ABOVE]
+                                               case '\u1EC4': 
+                                               // รƒยกร‚ยปรขโ‚ฌลพ  [LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND TILDE]
+                                               case '\u1EC6': 
+                                               // รƒยกร‚ยปรขโ‚ฌย   [LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND DOT BELOW]
+                                               case '\u24BA': 
+                                               // รƒยขรขโ‚ฌโ„ขร‚ยบ  [CIRCLED LATIN CAPITAL LETTER E]
+                                               case '\u2C7B': 
+                                               // รƒยขร‚ยฑร‚ยป  [LATIN LETTER SMALL CAPITAL TURNED E]
+                                               case '\uFF25':  // รƒยฏร‚ยผร‚ยฅ  [FULLWIDTH LATIN CAPITAL LETTER E]
+                                                       output[outputPos++] = 'E';
+                                                       break;
+                                               
+                                               case '\u00E8': 
+                                               // รƒฦ’ร‚ยจ  [LATIN SMALL LETTER E WITH GRAVE]
+                                               case '\u00E9': 
+                                               // รƒฦ’ร‚ยฉ  [LATIN SMALL LETTER E WITH ACUTE]
+                                               case '\u00EA': 
+                                               // รƒฦ’ร‚ยช  [LATIN SMALL LETTER E WITH CIRCUMFLEX]
+                                               case '\u00EB': 
+                                               // รƒฦ’ร‚ยซ  [LATIN SMALL LETTER E WITH DIAERESIS]
+                                               case '\u0113': 
+                                               // รƒโ€žรขโ‚ฌล“  [LATIN SMALL LETTER E WITH MACRON]
+                                               case '\u0115': 
+                                               // รƒโ€žรขโ‚ฌยข  [LATIN SMALL LETTER E WITH BREVE]
+                                               case '\u0117': 
+                                               // รƒโ€žรขโ‚ฌโ€  [LATIN SMALL LETTER E WITH DOT ABOVE]
+                                               case '\u0119': 
+                                               // รƒโ€žรขโ€žยข  [LATIN SMALL LETTER E WITH OGONEK]
+                                               case '\u011B': 
+                                               // รƒโ€žรขโ‚ฌยบ  [LATIN SMALL LETTER E WITH CARON]
+                                               case '\u01DD': 
+                                               // รƒโ€กรฏยฟยฝ  [LATIN SMALL LETTER TURNED E]
+                                               case '\u0205': 
+                                               // รƒห†รขโ‚ฌยฆ  [LATIN SMALL LETTER E WITH DOUBLE GRAVE]
+                                               case '\u0207': 
+                                               // รƒห†รขโ‚ฌยก  [LATIN SMALL LETTER E WITH INVERTED BREVE]
+                                               case '\u0229': 
+                                               // รƒห†ร‚ยฉ  [LATIN SMALL LETTER E WITH CEDILLA]
+                                               case '\u0247': 
+                                               // รƒโ€ฐรขโ‚ฌยก  [LATIN SMALL LETTER E WITH STROKE]
+                                               case '\u0258': 
+                                               // รƒโ€ฐร‹ล“  [LATIN SMALL LETTER REVERSED E]
+                                               case '\u025B': 
+                                               // รƒโ€ฐรขโ‚ฌยบ  [LATIN SMALL LETTER OPEN E]
+                                               case '\u025C': 
+                                               // รƒโ€ฐร…โ€œ  [LATIN SMALL LETTER REVERSED OPEN E]
+                                               case '\u025D': 
+                                               // รƒโ€ฐรฏยฟยฝ  [LATIN SMALL LETTER REVERSED OPEN E WITH HOOK]
+                                               case '\u025E': 
+                                               // รƒโ€ฐร…ยพ  [LATIN SMALL LETTER CLOSED REVERSED OPEN E]
+                                               case '\u029A': 
+                                               // รƒล ร…ยก  [LATIN SMALL LETTER CLOSED OPEN E]
+                                               case '\u1D08': 
+                                               // รƒยกร‚ยดร‹โ€   [LATIN SMALL LETTER TURNED OPEN E]
+                                               case '\u1D92': 
+                                               // รƒยกร‚ยถรขโ‚ฌโ„ข  [LATIN SMALL LETTER E WITH RETROFLEX HOOK]
+                                               case '\u1D93': 
+                                               // รƒยกร‚ยถรขโ‚ฌล“  [LATIN SMALL LETTER OPEN E WITH RETROFLEX HOOK]
+                                               case '\u1D94': 
+                                               // รƒยกร‚ยถรฏยฟยฝ?  [LATIN SMALL LETTER REVERSED OPEN E WITH RETROFLEX HOOK]
+                                               case '\u1E15': 
+                                               // รƒยกร‚ยธรขโ‚ฌยข  [LATIN SMALL LETTER E WITH MACRON AND GRAVE]
+                                               case '\u1E17': 
+                                               // รƒยกร‚ยธรขโ‚ฌโ€  [LATIN SMALL LETTER E WITH MACRON AND ACUTE]
+                                               case '\u1E19': 
+                                               // รƒยกร‚ยธรขโ€žยข  [LATIN SMALL LETTER E WITH CIRCUMFLEX BELOW]
+                                               case '\u1E1B': 
+                                               // รƒยกร‚ยธรขโ‚ฌยบ  [LATIN SMALL LETTER E WITH TILDE BELOW]
+                                               case '\u1E1D': 
+                                               // รƒยกร‚ยธรฏยฟยฝ  [LATIN SMALL LETTER E WITH CEDILLA AND BREVE]
+                                               case '\u1EB9': 
+                                               // รƒยกร‚ยบร‚ยน  [LATIN SMALL LETTER E WITH DOT BELOW]
+                                               case '\u1EBB': 
+                                               // รƒยกร‚ยบร‚ยป  [LATIN SMALL LETTER E WITH HOOK ABOVE]
+                                               case '\u1EBD': 
+                                               // รƒยกร‚ยบร‚ยฝ  [LATIN SMALL LETTER E WITH TILDE]
+                                               case '\u1EBF': 
+                                               // รƒยกร‚ยบร‚ยฟ  [LATIN SMALL LETTER E WITH CIRCUMFLEX AND ACUTE]
+                                               case '\u1EC1': 
+                                               // รƒยกร‚ยปรฏยฟยฝ  [LATIN SMALL LETTER E WITH CIRCUMFLEX AND GRAVE]
+                                               case '\u1EC3': 
+                                               // รƒยกร‚ยปร†โ€™  [LATIN SMALL LETTER E WITH CIRCUMFLEX AND HOOK ABOVE]
+                                               case '\u1EC5': 
+                                               // รƒยกร‚ยปรขโ‚ฌยฆ  [LATIN SMALL LETTER E WITH CIRCUMFLEX AND TILDE]
+                                               case '\u1EC7': 
+                                               // รƒยกร‚ยปรขโ‚ฌยก  [LATIN SMALL LETTER E WITH CIRCUMFLEX AND DOT BELOW]
+                                               case '\u2091': 
+                                               // รƒยขรขโ‚ฌลกรขโ‚ฌหœ  [LATIN SUBSCRIPT SMALL LETTER E]
+                                               case '\u24D4': 
+                                               // รƒยขรขโ‚ฌล“รฏยฟยฝ?  [CIRCLED LATIN SMALL LETTER E]
+                                               case '\u2C78': 
+                                               // รƒยขร‚ยฑร‚ยธ  [LATIN SMALL LETTER E WITH NOTCH]
+                                               case '\uFF45':  // รƒยฏร‚ยฝรขโ‚ฌยฆ  [FULLWIDTH LATIN SMALL LETTER E]
+                                                       output[outputPos++] = 'e';
+                                                       break;
+                                               
+                                               case '\u24A0':  // รƒยขรขโ‚ฌโ„ขร‚ย   [PARENTHESIZED LATIN SMALL LETTER E]
+                                                       output[outputPos++] = '(';
+                                                       output[outputPos++] = 'e';
+                                                       output[outputPos++] = ')';
+                                                       break;
+                                               
+                                               case '\u0191': 
+                                               // รƒโ€ รขโ‚ฌหœ  [LATIN CAPITAL LETTER F WITH HOOK]
+                                               case '\u1E1E': 
+                                               // รƒยกร‚ยธร…ยพ  [LATIN CAPITAL LETTER F WITH DOT ABOVE]
+                                               case '\u24BB': 
+                                               // รƒยขรขโ‚ฌโ„ขร‚ยป  [CIRCLED LATIN CAPITAL LETTER F]
+                                               case '\uA730': 
+                                               // รƒยชร…โ€œร‚ยฐ  [LATIN LETTER SMALL CAPITAL F]
+                                               case '\uA77B': 
+                                               // รƒยชรฏยฟยฝร‚ยป  [LATIN CAPITAL LETTER INSULAR F]
+                                               case '\uA7FB': 
+                                               // รƒยชร…ยธร‚ยป  [LATIN EPIGRAPHIC LETTER REVERSED F]
+                                               case '\uFF26':  // รƒยฏร‚ยผร‚ยฆ  [FULLWIDTH LATIN CAPITAL LETTER F]
+                                                       output[outputPos++] = 'F';
+                                                       break;
+                                               
+                                               case '\u0192': 
+                                               // รƒโ€ รขโ‚ฌโ„ข  [LATIN SMALL LETTER F WITH HOOK]
+                                               case '\u1D6E': 
+                                               // รƒยกร‚ยตร‚ยฎ  [LATIN SMALL LETTER F WITH MIDDLE TILDE]
+                                               case '\u1D82': 
+                                               // รƒยกร‚ยถรขโ‚ฌลก  [LATIN SMALL LETTER F WITH PALATAL HOOK]
+                                               case '\u1E1F': 
+                                               // รƒยกร‚ยธร…ยธ  [LATIN SMALL LETTER F WITH DOT ABOVE]
+                                               case '\u1E9B': 
+                                               // รƒยกร‚ยบรขโ‚ฌยบ  [LATIN SMALL LETTER LONG S WITH DOT ABOVE]
+                                               case '\u24D5': 
+                                               // รƒยขรขโ‚ฌล“รขโ‚ฌยข  [CIRCLED LATIN SMALL LETTER F]
+                                               case '\uA77C': 
+                                               // รƒยชรฏยฟยฝร‚ยผ  [LATIN SMALL LETTER INSULAR F]
+                                               case '\uFF46':  // รƒยฏร‚ยฝรขโ‚ฌย   [FULLWIDTH LATIN SMALL LETTER F]
+                                                       output[outputPos++] = 'f';
+                                                       break;
+                                               
+                                               case '\u24A1':  // รƒยขรขโ‚ฌโ„ขร‚ยก  [PARENTHESIZED LATIN SMALL LETTER F]
+                                                       output[outputPos++] = '(';
+                                                       output[outputPos++] = 'f';
+                                                       output[outputPos++] = ')';
+                                                       break;
+                                               
+                                               case '\uFB00':  // รƒยฏร‚ยฌรขโ€šยฌ  [LATIN SMALL LIGATURE FF]
+                                                       output[outputPos++] = 'f';
+                                                       output[outputPos++] = 'f';
+                                                       break;
+                                               
+                                               case '\uFB03':  // รƒยฏร‚ยฌร†โ€™  [LATIN SMALL LIGATURE FFI]
+                                                       output[outputPos++] = 'f';
+                                                       output[outputPos++] = 'f';
+                                                       output[outputPos++] = 'i';
+                                                       break;
+                                               
+                                               case '\uFB04':  // รƒยฏร‚ยฌรขโ‚ฌลพ  [LATIN SMALL LIGATURE FFL]
+                                                       output[outputPos++] = 'f';
+                                                       output[outputPos++] = 'f';
+                                                       output[outputPos++] = 'l';
+                                                       break;
+                                               
+                                               case '\uFB01':  // รƒยฏร‚ยฌรฏยฟยฝ  [LATIN SMALL LIGATURE FI]
+                                                       output[outputPos++] = 'f';
+                                                       output[outputPos++] = 'i';
+                                                       break;
+                                               
+                                               case '\uFB02':  // รƒยฏร‚ยฌรขโ‚ฌลก  [LATIN SMALL LIGATURE FL]
+                                                       output[outputPos++] = 'f';
+                                                       output[outputPos++] = 'l';
+                                                       break;
+                                               
+                                               case '\u011C': 
+                                               // รƒโ€žร…โ€œ  [LATIN CAPITAL LETTER G WITH CIRCUMFLEX]
+                                               case '\u011E': 
+                                               // รƒโ€žร…ยพ  [LATIN CAPITAL LETTER G WITH BREVE]
+                                               case '\u0120': 
+                                               // รƒโ€žร‚ย   [LATIN CAPITAL LETTER G WITH DOT ABOVE]
+                                               case '\u0122': 
+                                               // รƒโ€žร‚ยข  [LATIN CAPITAL LETTER G WITH CEDILLA]
+                                               case '\u0193': 
+                                               // รƒโ€ รขโ‚ฌล“  [LATIN CAPITAL LETTER G WITH HOOK]
+                                               case '\u01E4': 
+                                               // รƒโ€กร‚ยค  [LATIN CAPITAL LETTER G WITH STROKE]
+                                               case '\u01E5': 
+                                               // รƒโ€กร‚ยฅ  [LATIN SMALL LETTER G WITH STROKE]
+                                               case '\u01E6': 
+                                               // รƒโ€กร‚ยฆ  [LATIN CAPITAL LETTER G WITH CARON]
+                                               case '\u01E7': 
+                                               // รƒโ€กร‚ยง  [LATIN SMALL LETTER G WITH CARON]
+                                               case '\u01F4': 
+                                               // รƒโ€กร‚ยด  [LATIN CAPITAL LETTER G WITH ACUTE]
+                                               case '\u0262': 
+                                               // รƒโ€ฐร‚ยข  [LATIN LETTER SMALL CAPITAL G]
+                                               case '\u029B': 
+                                               // รƒล รขโ‚ฌยบ  [LATIN LETTER SMALL CAPITAL G WITH HOOK]
+                                               case '\u1E20': 
+                                               // รƒยกร‚ยธร‚ย   [LATIN CAPITAL LETTER G WITH MACRON]
+                                               case '\u24BC': 
+                                               // รƒยขรขโ‚ฌโ„ขร‚ยผ  [CIRCLED LATIN CAPITAL LETTER G]
+                                               case '\uA77D': 
+                                               // รƒยชรฏยฟยฝร‚ยฝ  [LATIN CAPITAL LETTER INSULAR G]
+                                               case '\uA77E': 
+                                               // รƒยชรฏยฟยฝร‚ยพ  [LATIN CAPITAL LETTER TURNED INSULAR G]
+                                               case '\uFF27':  // รƒยฏร‚ยผร‚ยง  [FULLWIDTH LATIN CAPITAL LETTER G]
+                                                       output[outputPos++] = 'G';
+                                                       break;
+                                               
+                                               case '\u011D': 
+                                               // รƒโ€žรฏยฟยฝ  [LATIN SMALL LETTER G WITH CIRCUMFLEX]
+                                               case '\u011F': 
+                                               // รƒโ€žร…ยธ  [LATIN SMALL LETTER G WITH BREVE]
+                                               case '\u0121': 
+                                               // รƒโ€žร‚ยก  [LATIN SMALL LETTER G WITH DOT ABOVE]
+                                               case '\u0123': 
+                                               // รƒโ€žร‚ยฃ  [LATIN SMALL LETTER G WITH CEDILLA]
+                                               case '\u01F5': 
+                                               // รƒโ€กร‚ยต  [LATIN SMALL LETTER G WITH ACUTE]
+                                               case '\u0260': 
+                                               // รƒโ€ฐร‚ย   [LATIN SMALL LETTER G WITH HOOK]
+                                               case '\u0261': 
+                                               // รƒโ€ฐร‚ยก  [LATIN SMALL LETTER SCRIPT G]
+                                               case '\u1D77': 
+                                               // รƒยกร‚ยตร‚ยท  [LATIN SMALL LETTER TURNED G]
+                                               case '\u1D79': 
+                                               // รƒยกร‚ยตร‚ยน  [LATIN SMALL LETTER INSULAR G]
+                                               case '\u1D83': 
+                                               // รƒยกร‚ยถร†โ€™  [LATIN SMALL LETTER G WITH PALATAL HOOK]
+                                               case '\u1E21': 
+                                               // รƒยกร‚ยธร‚ยก  [LATIN SMALL LETTER G WITH MACRON]
+                                               case '\u24D6': 
+                                               // รƒยขรขโ‚ฌล“รขโ‚ฌโ€œ  [CIRCLED LATIN SMALL LETTER G]
+                                               case '\uA77F': 
+                                               // รƒยชรฏยฟยฝร‚ยฟ  [LATIN SMALL LETTER TURNED INSULAR G]
+                                               case '\uFF47':  // รƒยฏร‚ยฝรขโ‚ฌยก  [FULLWIDTH LATIN SMALL LETTER G]
+                                                       output[outputPos++] = 'g';
+                                                       break;
+                                               
+                                               case '\u24A2':  // รƒยขรขโ‚ฌโ„ขร‚ยข  [PARENTHESIZED LATIN SMALL LETTER G]
+                                                       output[outputPos++] = '(';
+                                                       output[outputPos++] = 'g';
+                                                       output[outputPos++] = ')';
+                                                       break;
+                                               
+                                               case '\u0124': 
+                                               // รƒโ€žร‚ยค  [LATIN CAPITAL LETTER H WITH CIRCUMFLEX]
+                                               case '\u0126': 
+                                               // รƒโ€žร‚ยฆ  [LATIN CAPITAL LETTER H WITH STROKE]
+                                               case '\u021E': 
+                                               // รƒห†ร…ยพ  [LATIN CAPITAL LETTER H WITH CARON]
+                                               case '\u029C': 
+                                               // รƒล ร…โ€œ  [LATIN LETTER SMALL CAPITAL H]
+                                               case '\u1E22': 
+                                               // รƒยกร‚ยธร‚ยข  [LATIN CAPITAL LETTER H WITH DOT ABOVE]
+                                               case '\u1E24': 
+                                               // รƒยกร‚ยธร‚ยค  [LATIN CAPITAL LETTER H WITH DOT BELOW]
+                                               case '\u1E26': 
+                                               // รƒยกร‚ยธร‚ยฆ  [LATIN CAPITAL LETTER H WITH DIAERESIS]
+                                               case '\u1E28': 
+                                               // รƒยกร‚ยธร‚ยจ  [LATIN CAPITAL LETTER H WITH CEDILLA]
+                                               case '\u1E2A': 
+                                               // รƒยกร‚ยธร‚ยช  [LATIN CAPITAL LETTER H WITH BREVE BELOW]
+                                               case '\u24BD': 
+                                               // รƒยขรขโ‚ฌโ„ขร‚ยฝ  [CIRCLED LATIN CAPITAL LETTER H]
+                                               case '\u2C67': 
+                                               // รƒยขร‚ยฑร‚ยง  [LATIN CAPITAL LETTER H WITH DESCENDER]
+                                               case '\u2C75': 
+                                               // รƒยขร‚ยฑร‚ยต  [LATIN CAPITAL LETTER HALF H]
+                                               case '\uFF28':  // รƒยฏร‚ยผร‚ยจ  [FULLWIDTH LATIN CAPITAL LETTER H]
+                                                       output[outputPos++] = 'H';
+                                                       break;
+                                               
+                                               case '\u0125': 
+                                               // รƒโ€žร‚ยฅ  [LATIN SMALL LETTER H WITH CIRCUMFLEX]
+                                               case '\u0127': 
+                                               // รƒโ€žร‚ยง  [LATIN SMALL LETTER H WITH STROKE]
+                                               case '\u021F': 
+                                               // รƒห†ร…ยธ  [LATIN SMALL LETTER H WITH CARON]
+                                               case '\u0265': 
+                                               // รƒโ€ฐร‚ยฅ  [LATIN SMALL LETTER TURNED H]
+                                               case '\u0266': 
+                                               // รƒโ€ฐร‚ยฆ  [LATIN SMALL LETTER H WITH HOOK]
+                                               case '\u02AE': 
+                                               // รƒล ร‚ยฎ  [LATIN SMALL LETTER TURNED H WITH FISHHOOK]
+                                               case '\u02AF': 
+                                               // รƒล ร‚ยฏ  [LATIN SMALL LETTER TURNED H WITH FISHHOOK AND TAIL]
+                                               case '\u1E23': 
+                                               // รƒยกร‚ยธร‚ยฃ  [LATIN SMALL LETTER H WITH DOT ABOVE]
+                                               case '\u1E25': 
+                                               // รƒยกร‚ยธร‚ยฅ  [LATIN SMALL LETTER H WITH DOT BELOW]
+                                               case '\u1E27': 
+                                               // รƒยกร‚ยธร‚ยง  [LATIN SMALL LETTER H WITH DIAERESIS]
+                                               case '\u1E29': 
+                                               // รƒยกร‚ยธร‚ยฉ  [LATIN SMALL LETTER H WITH CEDILLA]
+                                               case '\u1E2B': 
+                                               // รƒยกร‚ยธร‚ยซ  [LATIN SMALL LETTER H WITH BREVE BELOW]
+                                               case '\u1E96': 
+                                               // รƒยกร‚ยบรขโ‚ฌโ€œ  [LATIN SMALL LETTER H WITH LINE BELOW]
+                                               case '\u24D7': 
+                                               // รƒยขรขโ‚ฌล“รขโ‚ฌโ€  [CIRCLED LATIN SMALL LETTER H]
+                                               case '\u2C68': 
+                                               // รƒยขร‚ยฑร‚ยจ  [LATIN SMALL LETTER H WITH DESCENDER]
+                                               case '\u2C76': 
+                                               // รƒยขร‚ยฑร‚ยถ  [LATIN SMALL LETTER HALF H]
+                                               case '\uFF48':  // รƒยฏร‚ยฝร‹โ€   [FULLWIDTH LATIN SMALL LETTER H]
+                                                       output[outputPos++] = 'h';
+                                                       break;
+                                               
+                                               case '\u01F6':  // รƒโ€กร‚ยถ  http://en.wikipedia.org/wiki/Hwair  [LATIN CAPITAL LETTER HWAIR]
+                                                       output[outputPos++] = 'H';
+                                                       output[outputPos++] = 'V';
+                                                       break;
+                                               
+                                               case '\u24A3':  // รƒยขรขโ‚ฌโ„ขร‚ยฃ  [PARENTHESIZED LATIN SMALL LETTER H]
+                                                       output[outputPos++] = '(';
+                                                       output[outputPos++] = 'h';
+                                                       output[outputPos++] = ')';
+                                                       break;
+                                               
+                                               case '\u0195':  // รƒโ€ รขโ‚ฌยข  [LATIN SMALL LETTER HV]
+                                                       output[outputPos++] = 'h';
+                                                       output[outputPos++] = 'v';
+                                                       break;
+                                               
+                                               case '\u00CC': 
+                                               // รƒฦ’ร…โ€™  [LATIN CAPITAL LETTER I WITH GRAVE]
+                                               case '\u00CD': 
+                                               // รƒฦ’รฏยฟยฝ  [LATIN CAPITAL LETTER I WITH ACUTE]
+                                               case '\u00CE': 
+                                               // รƒฦ’ร…ยฝ  [LATIN CAPITAL LETTER I WITH CIRCUMFLEX]
+                                               case '\u00CF': 
+                                               // รƒฦ’รฏยฟยฝ  [LATIN CAPITAL LETTER I WITH DIAERESIS]
+                                               case '\u0128': 
+                                               // รƒโ€žร‚ยจ  [LATIN CAPITAL LETTER I WITH TILDE]
+                                               case '\u012A': 
+                                               // รƒโ€žร‚ยช  [LATIN CAPITAL LETTER I WITH MACRON]
+                                               case '\u012C': 
+                                               // รƒโ€žร‚ยฌ  [LATIN CAPITAL LETTER I WITH BREVE]
+                                               case '\u012E': 
+                                               // รƒโ€žร‚ยฎ  [LATIN CAPITAL LETTER I WITH OGONEK]
+                                               case '\u0130': 
+                                               // รƒโ€žร‚ยฐ  [LATIN CAPITAL LETTER I WITH DOT ABOVE]
+                                               case '\u0196': 
+                                               // รƒโ€ รขโ‚ฌโ€œ  [LATIN CAPITAL LETTER IOTA]
+                                               case '\u0197': 
+                                               // รƒโ€ รขโ‚ฌโ€  [LATIN CAPITAL LETTER I WITH STROKE]
+                                               case '\u01CF': 
+                                               // รƒโ€กรฏยฟยฝ  [LATIN CAPITAL LETTER I WITH CARON]
+                                               case '\u0208': 
+                                               // รƒห†ร‹โ€   [LATIN CAPITAL LETTER I WITH DOUBLE GRAVE]
+                                               case '\u020A': 
+                                               // รƒห†ร…ย   [LATIN CAPITAL LETTER I WITH INVERTED BREVE]
+                                               case '\u026A': 
+                                               // รƒโ€ฐร‚ยช  [LATIN LETTER SMALL CAPITAL I]
+                                               case '\u1D7B': 
+                                               // รƒยกร‚ยตร‚ยป  [LATIN SMALL CAPITAL LETTER I WITH STROKE]
+                                               case '\u1E2C': 
+                                               // รƒยกร‚ยธร‚ยฌ  [LATIN CAPITAL LETTER I WITH TILDE BELOW]
+                                               case '\u1E2E': 
+                                               // รƒยกร‚ยธร‚ยฎ  [LATIN CAPITAL LETTER I WITH DIAERESIS AND ACUTE]
+                                               case '\u1EC8': 
+                                               // รƒยกร‚ยปร‹โ€   [LATIN CAPITAL LETTER I WITH HOOK ABOVE]
+                                               case '\u1ECA': 
+                                               // รƒยกร‚ยปร…ย   [LATIN CAPITAL LETTER I WITH DOT BELOW]
+                                               case '\u24BE': 
+                                               // รƒยขรขโ‚ฌโ„ขร‚ยพ  [CIRCLED LATIN CAPITAL LETTER I]
+                                               case '\uA7FE': 
+                                               // รƒยชร…ยธร‚ยพ  [LATIN EPIGRAPHIC LETTER I LONGA]
+                                               case '\uFF29':  // รƒยฏร‚ยผร‚ยฉ  [FULLWIDTH LATIN CAPITAL LETTER I]
+                                                       output[outputPos++] = 'I';
+                                                       break;
+                                               
+                                               case '\u00EC': 
+                                               // รƒฦ’ร‚ยฌ  [LATIN SMALL LETTER I WITH GRAVE]
+                                               case '\u00ED': 
+                                               // รƒฦ’ร‚ยญ  [LATIN SMALL LETTER I WITH ACUTE]
+                                               case '\u00EE': 
+                                               // รƒฦ’ร‚ยฎ  [LATIN SMALL LETTER I WITH CIRCUMFLEX]
+                                               case '\u00EF': 
+                                               // รƒฦ’ร‚ยฏ  [LATIN SMALL LETTER I WITH DIAERESIS]
+                                               case '\u0129': 
+                                               // รƒโ€žร‚ยฉ  [LATIN SMALL LETTER I WITH TILDE]
+                                               case '\u012B': 
+                                               // รƒโ€žร‚ยซ  [LATIN SMALL LETTER I WITH MACRON]
+                                               case '\u012D': 
+                                               // รƒโ€žร‚ยญ  [LATIN SMALL LETTER I WITH BREVE]
+                                               case '\u012F': 
+                                               // รƒโ€žร‚ยฏ  [LATIN SMALL LETTER I WITH OGONEK]
+                                               case '\u0131': 
+                                               // รƒโ€žร‚ยฑ  [LATIN SMALL LETTER DOTLESS I]
+                                               case '\u01D0': 
+                                               // รƒโ€กรฏยฟยฝ  [LATIN SMALL LETTER I WITH CARON]
+                                               case '\u0209': 
+                                               // รƒห†รขโ‚ฌยฐ  [LATIN SMALL LETTER I WITH DOUBLE GRAVE]
+                                               case '\u020B': 
+                                               // รƒห†รขโ‚ฌยน  [LATIN SMALL LETTER I WITH INVERTED BREVE]
+                                               case '\u0268': 
+                                               // รƒโ€ฐร‚ยจ  [LATIN SMALL LETTER I WITH STROKE]
+                                               case '\u1D09': 
+                                               // รƒยกร‚ยดรขโ‚ฌยฐ  [LATIN SMALL LETTER TURNED I]
+                                               case '\u1D62': 
+                                               // รƒยกร‚ยตร‚ยข  [LATIN SUBSCRIPT SMALL LETTER I]
+                                               case '\u1D7C': 
+                                               // รƒยกร‚ยตร‚ยผ  [LATIN SMALL LETTER IOTA WITH STROKE]
+                                               case '\u1D96': 
+                                               // รƒยกร‚ยถรขโ‚ฌโ€œ  [LATIN SMALL LETTER I WITH RETROFLEX HOOK]
+                                               case '\u1E2D': 
+                                               // รƒยกร‚ยธร‚ยญ  [LATIN SMALL LETTER I WITH TILDE BELOW]
+                                               case '\u1E2F': 
+                                               // รƒยกร‚ยธร‚ยฏ  [LATIN SMALL LETTER I WITH DIAERESIS AND ACUTE]
+                                               case '\u1EC9': 
+                                               // รƒยกร‚ยปรขโ‚ฌยฐ  [LATIN SMALL LETTER I WITH HOOK ABOVE]
+                                               case '\u1ECB': 
+                                               // รƒยกร‚ยปรขโ‚ฌยน  [LATIN SMALL LETTER I WITH DOT BELOW]
+                                               case '\u2071': 
+                                               // รƒยขรฏยฟยฝร‚ยฑ  [SUPERSCRIPT LATIN SMALL LETTER I]
+                                               case '\u24D8': 
+                                               // รƒยขรขโ‚ฌล“ร‹ล“  [CIRCLED LATIN SMALL LETTER I]
+                                               case '\uFF49':  // รƒยฏร‚ยฝรขโ‚ฌยฐ  [FULLWIDTH LATIN SMALL LETTER I]
+                                                       output[outputPos++] = 'i';
+                                                       break;
+                                               
+                                               case '\u0132':  // รƒโ€žร‚ยฒ  [LATIN CAPITAL LIGATURE IJ]
+                                                       output[outputPos++] = 'I';
+                                                       output[outputPos++] = 'J';
+                                                       break;
+                                               
+                                               case '\u24A4':  // รƒยขรขโ‚ฌโ„ขร‚ยค  [PARENTHESIZED LATIN SMALL LETTER I]
+                                                       output[outputPos++] = '(';
+                                                       output[outputPos++] = 'i';
+                                                       output[outputPos++] = ')';
+                                                       break;
+                                               
+                                               case '\u0133':  // รƒโ€žร‚ยณ  [LATIN SMALL LIGATURE IJ]
+                                                       output[outputPos++] = 'i';
+                                                       output[outputPos++] = 'j';
+                                                       break;
+                                               
+                                               case '\u0134': 
+                                               // รƒโ€žร‚ยด  [LATIN CAPITAL LETTER J WITH CIRCUMFLEX]
+                                               case '\u0248': 
+                                               // รƒโ€ฐร‹โ€   [LATIN CAPITAL LETTER J WITH STROKE]
+                                               case '\u1D0A': 
+                                               // รƒยกร‚ยดร…ย   [LATIN LETTER SMALL CAPITAL J]
+                                               case '\u24BF': 
+                                               // รƒยขรขโ‚ฌโ„ขร‚ยฟ  [CIRCLED LATIN CAPITAL LETTER J]
+                                               case '\uFF2A':  // รƒยฏร‚ยผร‚ยช  [FULLWIDTH LATIN CAPITAL LETTER J]
+                                                       output[outputPos++] = 'J';
+                                                       break;
+                                               
+                                               case '\u0135': 
+                                               // รƒโ€žร‚ยต  [LATIN SMALL LETTER J WITH CIRCUMFLEX]
+                                               case '\u01F0': 
+                                               // รƒโ€กร‚ยฐ  [LATIN SMALL LETTER J WITH CARON]
+                                               case '\u0237': 
+                                               // รƒห†ร‚ยท  [LATIN SMALL LETTER DOTLESS J]
+                                               case '\u0249': 
+                                               // รƒโ€ฐรขโ‚ฌยฐ  [LATIN SMALL LETTER J WITH STROKE]
+                                               case '\u025F': 
+                                               // รƒโ€ฐร…ยธ  [LATIN SMALL LETTER DOTLESS J WITH STROKE]
+                                               case '\u0284': 
+                                               // รƒล รขโ‚ฌลพ  [LATIN SMALL LETTER DOTLESS J WITH STROKE AND HOOK]
+                                               case '\u029D': 
+                                               // รƒล รฏยฟยฝ  [LATIN SMALL LETTER J WITH CROSSED-TAIL]
+                                               case '\u24D9': 
+                                               // รƒยขรขโ‚ฌล“รขโ€žยข  [CIRCLED LATIN SMALL LETTER J]
+                                               case '\u2C7C': 
+                                               // รƒยขร‚ยฑร‚ยผ  [LATIN SUBSCRIPT SMALL LETTER J]
+                                               case '\uFF4A':  // รƒยฏร‚ยฝร…ย   [FULLWIDTH LATIN SMALL LETTER J]
+                                                       output[outputPos++] = 'j';
+                                                       break;
+                                               
+                                               case '\u24A5':  // รƒยขรขโ‚ฌโ„ขร‚ยฅ  [PARENTHESIZED LATIN SMALL LETTER J]
+                                                       output[outputPos++] = '(';
+                                                       output[outputPos++] = 'j';
+                                                       output[outputPos++] = ')';
+                                                       break;
+                                               
+                                               case '\u0136': 
+                                               // รƒโ€žร‚ยถ  [LATIN CAPITAL LETTER K WITH CEDILLA]
+                                               case '\u0198': 
+                                               // รƒโ€ ร‹ล“  [LATIN CAPITAL LETTER K WITH HOOK]
+                                               case '\u01E8': 
+                                               // รƒโ€กร‚ยจ  [LATIN CAPITAL LETTER K WITH CARON]
+                                               case '\u1D0B': 
+                                               // รƒยกร‚ยดรขโ‚ฌยน  [LATIN LETTER SMALL CAPITAL K]
+                                               case '\u1E30': 
+                                               // รƒยกร‚ยธร‚ยฐ  [LATIN CAPITAL LETTER K WITH ACUTE]
+                                               case '\u1E32': 
+                                               // รƒยกร‚ยธร‚ยฒ  [LATIN CAPITAL LETTER K WITH DOT BELOW]
+                                               case '\u1E34': 
+                                               // รƒยกร‚ยธร‚ยด  [LATIN CAPITAL LETTER K WITH LINE BELOW]
+                                               case '\u24C0': 
+                                               // รƒยขรขโ‚ฌล“รขโ€šยฌ  [CIRCLED LATIN CAPITAL LETTER K]
+                                               case '\u2C69': 
+                                               // รƒยขร‚ยฑร‚ยฉ  [LATIN CAPITAL LETTER K WITH DESCENDER]
+                                               case '\uA740': 
+                                               // รƒยชรฏยฟยฝรขโ€šยฌ  [LATIN CAPITAL LETTER K WITH STROKE]
+                                               case '\uA742': 
+                                               // รƒยชรฏยฟยฝรขโ‚ฌลก  [LATIN CAPITAL LETTER K WITH DIAGONAL STROKE]
+                                               case '\uA744': 
+                                               // รƒยชรฏยฟยฝรขโ‚ฌลพ  [LATIN CAPITAL LETTER K WITH STROKE AND DIAGONAL STROKE]
+                                               case '\uFF2B':  // รƒยฏร‚ยผร‚ยซ  [FULLWIDTH LATIN CAPITAL LETTER K]
+                                                       output[outputPos++] = 'K';
+                                                       break;
+                                               
+                                               case '\u0137': 
+                                               // รƒโ€žร‚ยท  [LATIN SMALL LETTER K WITH CEDILLA]
+                                               case '\u0199': 
+                                               // รƒโ€ รขโ€žยข  [LATIN SMALL LETTER K WITH HOOK]
+                                               case '\u01E9': 
+                                               // รƒโ€กร‚ยฉ  [LATIN SMALL LETTER K WITH CARON]
+                                               case '\u029E': 
+                                               // รƒล ร…ยพ  [LATIN SMALL LETTER TURNED K]
+                                               case '\u1D84': 
+                                               // รƒยกร‚ยถรขโ‚ฌลพ  [LATIN SMALL LETTER K WITH PALATAL HOOK]
+                                               case '\u1E31': 
+                                               // รƒยกร‚ยธร‚ยฑ  [LATIN SMALL LETTER K WITH ACUTE]
+                                               case '\u1E33': 
+                                               // รƒยกร‚ยธร‚ยณ  [LATIN SMALL LETTER K WITH DOT BELOW]
+                                               case '\u1E35': 
+                                               // รƒยกร‚ยธร‚ยต  [LATIN SMALL LETTER K WITH LINE BELOW]
+                                               case '\u24DA': 
+                                               // รƒยขรขโ‚ฌล“ร…ยก  [CIRCLED LATIN SMALL LETTER K]
+                                               case '\u2C6A': 
+                                               // รƒยขร‚ยฑร‚ยช  [LATIN SMALL LETTER K WITH DESCENDER]
+                                               case '\uA741': 
+                                               // รƒยชรฏยฟยฝรฏยฟยฝ  [LATIN SMALL LETTER K WITH STROKE]
+                                               case '\uA743': 
+                                               // รƒยชรฏยฟยฝร†โ€™  [LATIN SMALL LETTER K WITH DIAGONAL STROKE]
+                                               case '\uA745': 
+                                               // รƒยชรฏยฟยฝรขโ‚ฌยฆ  [LATIN SMALL LETTER K WITH STROKE AND DIAGONAL STROKE]
+                                               case '\uFF4B':  // รƒยฏร‚ยฝรขโ‚ฌยน  [FULLWIDTH LATIN SMALL LETTER K]
+                                                       output[outputPos++] = 'k';
+                                                       break;
+                                               
+                                               case '\u24A6':  // รƒยขรขโ‚ฌโ„ขร‚ยฆ  [PARENTHESIZED LATIN SMALL LETTER K]
+                                                       output[outputPos++] = '(';
+                                                       output[outputPos++] = 'k';
+                                                       output[outputPos++] = ')';
+                                                       break;
+                                               
+                                               case '\u0139': 
+                                               // รƒโ€žร‚ยน  [LATIN CAPITAL LETTER L WITH ACUTE]
+                                               case '\u013B': 
+                                               // รƒโ€žร‚ยป  [LATIN CAPITAL LETTER L WITH CEDILLA]
+                                               case '\u013D': 
+                                               // รƒโ€žร‚ยฝ  [LATIN CAPITAL LETTER L WITH CARON]
+                                               case '\u013F': 
+                                               // รƒโ€žร‚ยฟ  [LATIN CAPITAL LETTER L WITH MIDDLE DOT]
+                                               case '\u0141': 
+                                               // รƒโ€ฆรฏยฟยฝ  [LATIN CAPITAL LETTER L WITH STROKE]
+                                               case '\u023D': 
+                                               // รƒห†ร‚ยฝ  [LATIN CAPITAL LETTER L WITH BAR]
+                                               case '\u029F': 
+                                               // รƒล ร…ยธ  [LATIN LETTER SMALL CAPITAL L]
+                                               case '\u1D0C': 
+                                               // รƒยกร‚ยดร…โ€™  [LATIN LETTER SMALL CAPITAL L WITH STROKE]
+                                               case '\u1E36': 
+                                               // รƒยกร‚ยธร‚ยถ  [LATIN CAPITAL LETTER L WITH DOT BELOW]
+                                               case '\u1E38': 
+                                               // รƒยกร‚ยธร‚ยธ  [LATIN CAPITAL LETTER L WITH DOT BELOW AND MACRON]
+                                               case '\u1E3A': 
+                                               // รƒยกร‚ยธร‚ยบ  [LATIN CAPITAL LETTER L WITH LINE BELOW]
+                                               case '\u1E3C': 
+                                               // รƒยกร‚ยธร‚ยผ  [LATIN CAPITAL LETTER L WITH CIRCUMFLEX BELOW]
+                                               case '\u24C1': 
+                                               // รƒยขรขโ‚ฌล“รฏยฟยฝ  [CIRCLED LATIN CAPITAL LETTER L]
+                                               case '\u2C60': 
+                                               // รƒยขร‚ยฑร‚ย   [LATIN CAPITAL LETTER L WITH DOUBLE BAR]
+                                               case '\u2C62': 
+                                               // รƒยขร‚ยฑร‚ยข  [LATIN CAPITAL LETTER L WITH MIDDLE TILDE]
+                                               case '\uA746': 
+                                               // รƒยชรฏยฟยฝรขโ‚ฌย   [LATIN CAPITAL LETTER BROKEN L]
+                                               case '\uA748': 
+                                               // รƒยชรฏยฟยฝร‹โ€   [LATIN CAPITAL LETTER L WITH HIGH STROKE]
+                                               case '\uA780': 
+                                               // รƒยชร…ยพรขโ€šยฌ  [LATIN CAPITAL LETTER TURNED L]
+                                               case '\uFF2C':  // รƒยฏร‚ยผร‚ยฌ  [FULLWIDTH LATIN CAPITAL LETTER L]
+                                                       output[outputPos++] = 'L';
+                                                       break;
+                                               
+                                               case '\u013A': 
+                                               // รƒโ€žร‚ยบ  [LATIN SMALL LETTER L WITH ACUTE]
+                                               case '\u013C': 
+                                               // รƒโ€žร‚ยผ  [LATIN SMALL LETTER L WITH CEDILLA]
+                                               case '\u013E': 
+                                               // รƒโ€žร‚ยพ  [LATIN SMALL LETTER L WITH CARON]
+                                               case '\u0140': 
+                                               // รƒโ€ฆรขโ€šยฌ  [LATIN SMALL LETTER L WITH MIDDLE DOT]
+                                               case '\u0142': 
+                                               // รƒโ€ฆรขโ‚ฌลก  [LATIN SMALL LETTER L WITH STROKE]
+                                               case '\u019A': 
+                                               // รƒโ€ ร…ยก  [LATIN SMALL LETTER L WITH BAR]
+                                               case '\u0234': 
+                                               // รƒห†ร‚ยด  [LATIN SMALL LETTER L WITH CURL]
+                                               case '\u026B': 
+                                               // รƒโ€ฐร‚ยซ  [LATIN SMALL LETTER L WITH MIDDLE TILDE]
+                                               case '\u026C': 
+                                               // รƒโ€ฐร‚ยฌ  [LATIN SMALL LETTER L WITH BELT]
+                                               case '\u026D': 
+                                               // รƒโ€ฐร‚ยญ  [LATIN SMALL LETTER L WITH RETROFLEX HOOK]
+                                               case '\u1D85': 
+                                               // รƒยกร‚ยถรขโ‚ฌยฆ  [LATIN SMALL LETTER L WITH PALATAL HOOK]
+                                               case '\u1E37': 
+                                               // รƒยกร‚ยธร‚ยท  [LATIN SMALL LETTER L WITH DOT BELOW]
+                                               case '\u1E39': 
+                                               // รƒยกร‚ยธร‚ยน  [LATIN SMALL LETTER L WITH DOT BELOW AND MACRON]
+                                               case '\u1E3B': 
+                                               // รƒยกร‚ยธร‚ยป  [LATIN SMALL LETTER L WITH LINE BELOW]
+                                               case '\u1E3D': 
+                                               // รƒยกร‚ยธร‚ยฝ  [LATIN SMALL LETTER L WITH CIRCUMFLEX BELOW]
+                                               case '\u24DB': 
+                                               // รƒยขรขโ‚ฌล“รขโ‚ฌยบ  [CIRCLED LATIN SMALL LETTER L]
+                                               case '\u2C61': 
+                                               // รƒยขร‚ยฑร‚ยก  [LATIN SMALL LETTER L WITH DOUBLE BAR]
+                                               case '\uA747': 
+                                               // รƒยชรฏยฟยฝรขโ‚ฌยก  [LATIN SMALL LETTER BROKEN L]
+                                               case '\uA749': 
+                                               // รƒยชรฏยฟยฝรขโ‚ฌยฐ  [LATIN SMALL LETTER L WITH HIGH STROKE]
+                                               case '\uA781': 
+                                               // รƒยชร…ยพรฏยฟยฝ  [LATIN SMALL LETTER TURNED L]
+                                               case '\uFF4C':  // รƒยฏร‚ยฝร…โ€™  [FULLWIDTH LATIN SMALL LETTER L]
+                                                       output[outputPos++] = 'l';
+                                                       break;
+                                               
+                                               case '\u01C7':  // รƒโ€กรขโ‚ฌยก  [LATIN CAPITAL LETTER LJ]
+                                                       output[outputPos++] = 'L';
+                                                       output[outputPos++] = 'J';
+                                                       break;
+                                               
+                                               case '\u1EFA':  // รƒยกร‚ยปร‚ยบ  [LATIN CAPITAL LETTER MIDDLE-WELSH LL]
+                                                       output[outputPos++] = 'L';
+                                                       output[outputPos++] = 'L';
+                                                       break;
+                                               
+                                               case '\u01C8':  // รƒโ€กร‹โ€   [LATIN CAPITAL LETTER L WITH SMALL LETTER J]
+                                                       output[outputPos++] = 'L';
+                                                       output[outputPos++] = 'j';
+                                                       break;
+                                               
+                                               case '\u24A7':  // รƒยขรขโ‚ฌโ„ขร‚ยง  [PARENTHESIZED LATIN SMALL LETTER L]
+                                                       output[outputPos++] = '(';
+                                                       output[outputPos++] = 'l';
+                                                       output[outputPos++] = ')';
+                                                       break;
+                                               
+                                               case '\u01C9':  // รƒโ€กรขโ‚ฌยฐ  [LATIN SMALL LETTER LJ]
+                                                       output[outputPos++] = 'l';
+                                                       output[outputPos++] = 'j';
+                                                       break;
+                                               
+                                               case '\u1EFB':  // รƒยกร‚ยปร‚ยป  [LATIN SMALL LETTER MIDDLE-WELSH LL]
+                                                       output[outputPos++] = 'l';
+                                                       output[outputPos++] = 'l';
+                                                       break;
+                                               
+                                               case '\u02AA':  // รƒล ร‚ยช  [LATIN SMALL LETTER LS DIGRAPH]
+                                                       output[outputPos++] = 'l';
+                                                       output[outputPos++] = 's';
+                                                       break;
+                                               
+                                               case '\u02AB':  // รƒล ร‚ยซ  [LATIN SMALL LETTER LZ DIGRAPH]
+                                                       output[outputPos++] = 'l';
+                                                       output[outputPos++] = 'z';
+                                                       break;
+                                               
+                                               case '\u019C': 
+                                               // รƒโ€ ร…โ€œ  [LATIN CAPITAL LETTER TURNED M]
+                                               case '\u1D0D': 
+                                               // รƒยกร‚ยดรฏยฟยฝ  [LATIN LETTER SMALL CAPITAL M]
+                                               case '\u1E3E': 
+                                               // รƒยกร‚ยธร‚ยพ  [LATIN CAPITAL LETTER M WITH ACUTE]
+                                               case '\u1E40': 
+                                               // รƒยกร‚ยนรขโ€šยฌ  [LATIN CAPITAL LETTER M WITH DOT ABOVE]
+                                               case '\u1E42': 
+                                               // รƒยกร‚ยนรขโ‚ฌลก  [LATIN CAPITAL LETTER M WITH DOT BELOW]
+                                               case '\u24C2': 
+                                               // รƒยขรขโ‚ฌล“รขโ‚ฌลก  [CIRCLED LATIN CAPITAL LETTER M]
+                                               case '\u2C6E': 
+                                               // รƒยขร‚ยฑร‚ยฎ  [LATIN CAPITAL LETTER M WITH HOOK]
+                                               case '\uA7FD': 
+                                               // รƒยชร…ยธร‚ยฝ  [LATIN EPIGRAPHIC LETTER INVERTED M]
+                                               case '\uA7FF': 
+                                               // รƒยชร…ยธร‚ยฟ  [LATIN EPIGRAPHIC LETTER ARCHAIC M]
+                                               case '\uFF2D':  // รƒยฏร‚ยผร‚ยญ  [FULLWIDTH LATIN CAPITAL LETTER M]
+                                                       output[outputPos++] = 'M';
+                                                       break;
+                                               
+                                               case '\u026F': 
+                                               // รƒโ€ฐร‚ยฏ  [LATIN SMALL LETTER TURNED M]
+                                               case '\u0270': 
+                                               // รƒโ€ฐร‚ยฐ  [LATIN SMALL LETTER TURNED M WITH LONG LEG]
+                                               case '\u0271': 
+                                               // รƒโ€ฐร‚ยฑ  [LATIN SMALL LETTER M WITH HOOK]
+                                               case '\u1D6F': 
+                                               // รƒยกร‚ยตร‚ยฏ  [LATIN SMALL LETTER M WITH MIDDLE TILDE]
+                                               case '\u1D86': 
+                                               // รƒยกร‚ยถรขโ‚ฌย   [LATIN SMALL LETTER M WITH PALATAL HOOK]
+                                               case '\u1E3F': 
+                                               // รƒยกร‚ยธร‚ยฟ  [LATIN SMALL LETTER M WITH ACUTE]
+                                               case '\u1E41': 
+                                               // รƒยกร‚ยนรฏยฟยฝ  [LATIN SMALL LETTER M WITH DOT ABOVE]
+                                               case '\u1E43': 
+                                               // รƒยกร‚ยนร†โ€™  [LATIN SMALL LETTER M WITH DOT BELOW]
+                                               case '\u24DC': 
+                                               // รƒยขรขโ‚ฌล“ร…โ€œ  [CIRCLED LATIN SMALL LETTER M]
+                                               case '\uFF4D':  // รƒยฏร‚ยฝรฏยฟยฝ  [FULLWIDTH LATIN SMALL LETTER M]
+                                                       output[outputPos++] = 'm';
+                                                       break;
+                                               
+                                               case '\u24A8':  // รƒยขรขโ‚ฌโ„ขร‚ยจ  [PARENTHESIZED LATIN SMALL LETTER M]
+                                                       output[outputPos++] = '(';
+                                                       output[outputPos++] = 'm';
+                                                       output[outputPos++] = ')';
+                                                       break;
+                                               
+                                               case '\u00D1': 
+                                               // รƒฦ’รขโ‚ฌหœ  [LATIN CAPITAL LETTER N WITH TILDE]
+                                               case '\u0143': 
+                                               // รƒโ€ฆร†โ€™  [LATIN CAPITAL LETTER N WITH ACUTE]
+                                               case '\u0145': 
+                                               // รƒโ€ฆรขโ‚ฌยฆ  [LATIN CAPITAL LETTER N WITH CEDILLA]
+                                               case '\u0147': 
+                                               // รƒโ€ฆรขโ‚ฌยก  [LATIN CAPITAL LETTER N WITH CARON]
+                                               case '\u014A': 
+                                               // รƒโ€ฆร…ย   http://en.wikipedia.org/wiki/Eng_(letter)  [LATIN CAPITAL LETTER ENG]
+                                               case '\u019D': 
+                                               // รƒโ€ รฏยฟยฝ  [LATIN CAPITAL LETTER N WITH LEFT HOOK]
+                                               case '\u01F8': 
+                                               // รƒโ€กร‚ยธ  [LATIN CAPITAL LETTER N WITH GRAVE]
+                                               case '\u0220': 
+                                               // รƒห†ร‚ย   [LATIN CAPITAL LETTER N WITH LONG RIGHT LEG]
+                                               case '\u0274': 
+                                               // รƒโ€ฐร‚ยด  [LATIN LETTER SMALL CAPITAL N]
+                                               case '\u1D0E': 
+                                               // รƒยกร‚ยดร…ยฝ  [LATIN LETTER SMALL CAPITAL REVERSED N]
+                                               case '\u1E44': 
+                                               // รƒยกร‚ยนรขโ‚ฌลพ  [LATIN CAPITAL LETTER N WITH DOT ABOVE]
+                                               case '\u1E46': 
+                                               // รƒยกร‚ยนรขโ‚ฌย   [LATIN CAPITAL LETTER N WITH DOT BELOW]
+                                               case '\u1E48': 
+                                               // รƒยกร‚ยนร‹โ€   [LATIN CAPITAL LETTER N WITH LINE BELOW]
+                                               case '\u1E4A': 
+                                               // รƒยกร‚ยนร…ย   [LATIN CAPITAL LETTER N WITH CIRCUMFLEX BELOW]
+                                               case '\u24C3': 
+                                               // รƒยขรขโ‚ฌล“ร†โ€™  [CIRCLED LATIN CAPITAL LETTER N]
+                                               case '\uFF2E':  // รƒยฏร‚ยผร‚ยฎ  [FULLWIDTH LATIN CAPITAL LETTER N]
+                                                       output[outputPos++] = 'N';
+                                                       break;
+                                               
+                                               case '\u00F1': 
+                                               // รƒฦ’ร‚ยฑ  [LATIN SMALL LETTER N WITH TILDE]
+                                               case '\u0144': 
+                                               // รƒโ€ฆรขโ‚ฌลพ  [LATIN SMALL LETTER N WITH ACUTE]
+                                               case '\u0146': 
+                                               // รƒโ€ฆรขโ‚ฌย   [LATIN SMALL LETTER N WITH CEDILLA]
+                                               case '\u0148': 
+                                               // รƒโ€ฆร‹โ€   [LATIN SMALL LETTER N WITH CARON]
+                                               case '\u0149': 
+                                               // รƒโ€ฆรขโ‚ฌยฐ  [LATIN SMALL LETTER N PRECEDED BY APOSTROPHE]
+                                               case '\u014B': 
+                                               // รƒโ€ฆรขโ‚ฌยน  http://en.wikipedia.org/wiki/Eng_(letter)  [LATIN SMALL LETTER ENG]
+                                               case '\u019E': 
+                                               // รƒโ€ ร…ยพ  [LATIN SMALL LETTER N WITH LONG RIGHT LEG]
+                                               case '\u01F9': 
+                                               // รƒโ€กร‚ยน  [LATIN SMALL LETTER N WITH GRAVE]
+                                               case '\u0235': 
+                                               // รƒห†ร‚ยต  [LATIN SMALL LETTER N WITH CURL]
+                                               case '\u0272': 
+                                               // รƒโ€ฐร‚ยฒ  [LATIN SMALL LETTER N WITH LEFT HOOK]
+                                               case '\u0273': 
+                                               // รƒโ€ฐร‚ยณ  [LATIN SMALL LETTER N WITH RETROFLEX HOOK]
+                                               case '\u1D70': 
+                                               // รƒยกร‚ยตร‚ยฐ  [LATIN SMALL LETTER N WITH MIDDLE TILDE]
+                                               case '\u1D87': 
+                                               // รƒยกร‚ยถรขโ‚ฌยก  [LATIN SMALL LETTER N WITH PALATAL HOOK]
+                                               case '\u1E45': 
+                                               // รƒยกร‚ยนรขโ‚ฌยฆ  [LATIN SMALL LETTER N WITH DOT ABOVE]
+                                               case '\u1E47': 
+                                               // รƒยกร‚ยนรขโ‚ฌยก  [LATIN SMALL LETTER N WITH DOT BELOW]
+                                               case '\u1E49': 
+                                               // รƒยกร‚ยนรขโ‚ฌยฐ  [LATIN SMALL LETTER N WITH LINE BELOW]
+                                               case '\u1E4B': 
+                                               // รƒยกร‚ยนรขโ‚ฌยน  [LATIN SMALL LETTER N WITH CIRCUMFLEX BELOW]
+                                               case '\u207F': 
+                                               // รƒยขรฏยฟยฝร‚ยฟ  [SUPERSCRIPT LATIN SMALL LETTER N]
+                                               case '\u24DD': 
+                                               // รƒยขรขโ‚ฌล“รฏยฟยฝ  [CIRCLED LATIN SMALL LETTER N]
+                                               case '\uFF4E':  // รƒยฏร‚ยฝร…ยฝ  [FULLWIDTH LATIN SMALL LETTER N]
+                                                       output[outputPos++] = 'n';
+                                                       break;
+                                               
+                                               case '\u01CA':  // รƒโ€กร…ย   [LATIN CAPITAL LETTER NJ]
+                                                       output[outputPos++] = 'N';
+                                                       output[outputPos++] = 'J';
+                                                       break;
+                                               
+                                               case '\u01CB':  // รƒโ€กรขโ‚ฌยน  [LATIN CAPITAL LETTER N WITH SMALL LETTER J]
+                                                       output[outputPos++] = 'N';
+                                                       output[outputPos++] = 'j';
+                                                       break;
+                                               
+                                               case '\u24A9':  // รƒยขรขโ‚ฌโ„ขร‚ยฉ  [PARENTHESIZED LATIN SMALL LETTER N]
+                                                       output[outputPos++] = '(';
+                                                       output[outputPos++] = 'n';
+                                                       output[outputPos++] = ')';
+                                                       break;
+                                               
+                                               case '\u01CC':  // รƒโ€กร…โ€™  [LATIN SMALL LETTER NJ]
+                                                       output[outputPos++] = 'n';
+                                                       output[outputPos++] = 'j';
+                                                       break;
+                                               
+                                               case '\u00D2': 
+                                               // รƒฦ’รขโ‚ฌโ„ข  [LATIN CAPITAL LETTER O WITH GRAVE]
+                                               case '\u00D3': 
+                                               // รƒฦ’รขโ‚ฌล“  [LATIN CAPITAL LETTER O WITH ACUTE]
+                                               case '\u00D4': 
+                                               // รƒฦ’รฏยฟยฝ?  [LATIN CAPITAL LETTER O WITH CIRCUMFLEX]
+                                               case '\u00D5': 
+                                               // รƒฦ’รขโ‚ฌยข  [LATIN CAPITAL LETTER O WITH TILDE]
+                                               case '\u00D6': 
+                                               // รƒฦ’รขโ‚ฌโ€œ  [LATIN CAPITAL LETTER O WITH DIAERESIS]
+                                               case '\u00D8': 
+                                               // รƒฦ’ร‹ล“  [LATIN CAPITAL LETTER O WITH STROKE]
+                                               case '\u014C': 
+                                               // รƒโ€ฆร…โ€™  [LATIN CAPITAL LETTER O WITH MACRON]
+                                               case '\u014E': 
+                                               // รƒโ€ฆร…ยฝ  [LATIN CAPITAL LETTER O WITH BREVE]
+                                               case '\u0150': 
+                                               // รƒโ€ฆรฏยฟยฝ  [LATIN CAPITAL LETTER O WITH DOUBLE ACUTE]
+                                               case '\u0186': 
+                                               // รƒโ€ รขโ‚ฌย   [LATIN CAPITAL LETTER OPEN O]
+                                               case '\u019F': 
+                                               // รƒโ€ ร…ยธ  [LATIN CAPITAL LETTER O WITH MIDDLE TILDE]
+                                               case '\u01A0': 
+                                               // รƒโ€ ร‚ย   [LATIN CAPITAL LETTER O WITH HORN]
+                                               case '\u01D1': 
+                                               // รƒโ€กรขโ‚ฌหœ  [LATIN CAPITAL LETTER O WITH CARON]
+                                               case '\u01EA': 
+                                               // รƒโ€กร‚ยช  [LATIN CAPITAL LETTER O WITH OGONEK]
+                                               case '\u01EC': 
+                                               // รƒโ€กร‚ยฌ  [LATIN CAPITAL LETTER O WITH OGONEK AND MACRON]
+                                               case '\u01FE': 
+                                               // รƒโ€กร‚ยพ  [LATIN CAPITAL LETTER O WITH STROKE AND ACUTE]
+                                               case '\u020C': 
+                                               // รƒห†ร…โ€™  [LATIN CAPITAL LETTER O WITH DOUBLE GRAVE]
+                                               case '\u020E': 
+                                               // รƒห†ร…ยฝ  [LATIN CAPITAL LETTER O WITH INVERTED BREVE]
+                                               case '\u022A': 
+                                               // รƒห†ร‚ยช  [LATIN CAPITAL LETTER O WITH DIAERESIS AND MACRON]
+                                               case '\u022C': 
+                                               // รƒห†ร‚ยฌ  [LATIN CAPITAL LETTER O WITH TILDE AND MACRON]
+                                               case '\u022E': 
+                                               // รƒห†ร‚ยฎ  [LATIN CAPITAL LETTER O WITH DOT ABOVE]
+                                               case '\u0230': 
+                                               // รƒห†ร‚ยฐ  [LATIN CAPITAL LETTER O WITH DOT ABOVE AND MACRON]
+                                               case '\u1D0F': 
+                                               // รƒยกร‚ยดรฏยฟยฝ  [LATIN LETTER SMALL CAPITAL O]
+                                               case '\u1D10': 
+                                               // รƒยกร‚ยดรฏยฟยฝ  [LATIN LETTER SMALL CAPITAL OPEN O]
+                                               case '\u1E4C': 
+                                               // รƒยกร‚ยนร…โ€™  [LATIN CAPITAL LETTER O WITH TILDE AND ACUTE]
+                                               case '\u1E4E': 
+                                               // รƒยกร‚ยนร…ยฝ  [LATIN CAPITAL LETTER O WITH TILDE AND DIAERESIS]
+                                               case '\u1E50': 
+                                               // รƒยกร‚ยนรฏยฟยฝ  [LATIN CAPITAL LETTER O WITH MACRON AND GRAVE]
+                                               case '\u1E52': 
+                                               // รƒยกร‚ยนรขโ‚ฌโ„ข  [LATIN CAPITAL LETTER O WITH MACRON AND ACUTE]
+                                               case '\u1ECC': 
+                                               // รƒยกร‚ยปร…โ€™  [LATIN CAPITAL LETTER O WITH DOT BELOW]
+                                               case '\u1ECE': 
+                                               // รƒยกร‚ยปร…ยฝ  [LATIN CAPITAL LETTER O WITH HOOK ABOVE]
+                                               case '\u1ED0': 
+                                               // รƒยกร‚ยปรฏยฟยฝ  [LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND ACUTE]
+                                               case '\u1ED2': 
+                                               // รƒยกร‚ยปรขโ‚ฌโ„ข  [LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND GRAVE]
+                                               case '\u1ED4': 
+                                               // รƒยกร‚ยปรฏยฟยฝ?  [LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND HOOK ABOVE]
+                                               case '\u1ED6': 
+                                               // รƒยกร‚ยปรขโ‚ฌโ€œ  [LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND TILDE]
+                                               case '\u1ED8': 
+                                               // รƒยกร‚ยปร‹ล“  [LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND DOT BELOW]
+                                               case '\u1EDA': 
+                                               // รƒยกร‚ยปร…ยก  [LATIN CAPITAL LETTER O WITH HORN AND ACUTE]
+                                               case '\u1EDC': 
+                                               // รƒยกร‚ยปร…โ€œ  [LATIN CAPITAL LETTER O WITH HORN AND GRAVE]
+                                               case '\u1EDE': 
+                                               // รƒยกร‚ยปร…ยพ  [LATIN CAPITAL LETTER O WITH HORN AND HOOK ABOVE]
+                                               case '\u1EE0': 
+                                               // รƒยกร‚ยปร‚ย   [LATIN CAPITAL LETTER O WITH HORN AND TILDE]
+                                               case '\u1EE2': 
+                                               // รƒยกร‚ยปร‚ยข  [LATIN CAPITAL LETTER O WITH HORN AND DOT BELOW]
+                                               case '\u24C4': 
+                                               // รƒยขรขโ‚ฌล“รขโ‚ฌลพ  [CIRCLED LATIN CAPITAL LETTER O]
+                                               case '\uA74A': 
+                                               // รƒยชรฏยฟยฝร…ย   [LATIN CAPITAL LETTER O WITH LONG STROKE OVERLAY]
+                                               case '\uA74C': 
+                                               // รƒยชรฏยฟยฝร…โ€™  [LATIN CAPITAL LETTER O WITH LOOP]
+                                               case '\uFF2F':  // รƒยฏร‚ยผร‚ยฏ  [FULLWIDTH LATIN CAPITAL LETTER O]
+                                                       output[outputPos++] = 'O';
+                                                       break;
+                                               
+                                               case '\u00F2': 
+                                               // รƒฦ’ร‚ยฒ  [LATIN SMALL LETTER O WITH GRAVE]
+                                               case '\u00F3': 
+                                               // รƒฦ’ร‚ยณ  [LATIN SMALL LETTER O WITH ACUTE]
+                                               case '\u00F4': 
+                                               // รƒฦ’ร‚ยด  [LATIN SMALL LETTER O WITH CIRCUMFLEX]
+                                               case '\u00F5': 
+                                               // รƒฦ’ร‚ยต  [LATIN SMALL LETTER O WITH TILDE]
+                                               case '\u00F6': 
+                                               // รƒฦ’ร‚ยถ  [LATIN SMALL LETTER O WITH DIAERESIS]
+                                               case '\u00F8': 
+                                               // รƒฦ’ร‚ยธ  [LATIN SMALL LETTER O WITH STROKE]
+                                               case '\u014D': 
+                                               // รƒโ€ฆรฏยฟยฝ  [LATIN SMALL LETTER O WITH MACRON]
+                                               case '\u014F': 
+                                               // รƒโ€ฆรฏยฟยฝ  [LATIN SMALL LETTER O WITH BREVE]
+                                               case '\u0151': 
+                                               // รƒโ€ฆรขโ‚ฌหœ  [LATIN SMALL LETTER O WITH DOUBLE ACUTE]
+                                               case '\u01A1': 
+                                               // รƒโ€ ร‚ยก  [LATIN SMALL LETTER O WITH HORN]
+                                               case '\u01D2': 
+                                               // รƒโ€กรขโ‚ฌโ„ข  [LATIN SMALL LETTER O WITH CARON]
+                                               case '\u01EB': 
+                                               // รƒโ€กร‚ยซ  [LATIN SMALL LETTER O WITH OGONEK]
+                                               case '\u01ED': 
+                                               // รƒโ€กร‚ยญ  [LATIN SMALL LETTER O WITH OGONEK AND MACRON]
+                                               case '\u01FF': 
+                                               // รƒโ€กร‚ยฟ  [LATIN SMALL LETTER O WITH STROKE AND ACUTE]
+                                               case '\u020D': 
+                                               // รƒห†รฏยฟยฝ  [LATIN SMALL LETTER O WITH DOUBLE GRAVE]
+                                               case '\u020F': 
+                                               // รƒห†รฏยฟยฝ  [LATIN SMALL LETTER O WITH INVERTED BREVE]
+                                               case '\u022B': 
+                                               // รƒห†ร‚ยซ  [LATIN SMALL LETTER O WITH DIAERESIS AND MACRON]
+                                               case '\u022D': 
+                                               // รƒห†ร‚ยญ  [LATIN SMALL LETTER O WITH TILDE AND MACRON]
+                                               case '\u022F': 
+                                               // รƒห†ร‚ยฏ  [LATIN SMALL LETTER O WITH DOT ABOVE]
+                                               case '\u0231': 
+                                               // รƒห†ร‚ยฑ  [LATIN SMALL LETTER O WITH DOT ABOVE AND MACRON]
+                                               case '\u0254': 
+                                               // รƒโ€ฐรฏยฟยฝ?  [LATIN SMALL LETTER OPEN O]
+                                               case '\u0275': 
+                                               // รƒโ€ฐร‚ยต  [LATIN SMALL LETTER BARRED O]
+                                               case '\u1D16': 
+                                               // รƒยกร‚ยดรขโ‚ฌโ€œ  [LATIN SMALL LETTER TOP HALF O]
+                                               case '\u1D17': 
+                                               // รƒยกร‚ยดรขโ‚ฌโ€  [LATIN SMALL LETTER BOTTOM HALF O]
+                                               case '\u1D97': 
+                                               // รƒยกร‚ยถรขโ‚ฌโ€  [LATIN SMALL LETTER OPEN O WITH RETROFLEX HOOK]
+                                               case '\u1E4D': 
+                                               // รƒยกร‚ยนรฏยฟยฝ  [LATIN SMALL LETTER O WITH TILDE AND ACUTE]
+                                               case '\u1E4F': 
+                                               // รƒยกร‚ยนรฏยฟยฝ  [LATIN SMALL LETTER O WITH TILDE AND DIAERESIS]
+                                               case '\u1E51': 
+                                               // รƒยกร‚ยนรขโ‚ฌหœ  [LATIN SMALL LETTER O WITH MACRON AND GRAVE]
+                                               case '\u1E53': 
+                                               // รƒยกร‚ยนรขโ‚ฌล“  [LATIN SMALL LETTER O WITH MACRON AND ACUTE]
+                                               case '\u1ECD': 
+                                               // รƒยกร‚ยปรฏยฟยฝ  [LATIN SMALL LETTER O WITH DOT BELOW]
+                                               case '\u1ECF': 
+                                               // รƒยกร‚ยปรฏยฟยฝ  [LATIN SMALL LETTER O WITH HOOK ABOVE]
+                                               case '\u1ED1': 
+                                               // รƒยกร‚ยปรขโ‚ฌหœ  [LATIN SMALL LETTER O WITH CIRCUMFLEX AND ACUTE]
+                                               case '\u1ED3': 
+                                               // รƒยกร‚ยปรขโ‚ฌล“  [LATIN SMALL LETTER O WITH CIRCUMFLEX AND GRAVE]
+                                               case '\u1ED5': 
+                                               // รƒยกร‚ยปรขโ‚ฌยข  [LATIN SMALL LETTER O WITH CIRCUMFLEX AND HOOK ABOVE]
+                                               case '\u1ED7': 
+                                               // รƒยกร‚ยปรขโ‚ฌโ€  [LATIN SMALL LETTER O WITH CIRCUMFLEX AND TILDE]
+                                               case '\u1ED9': 
+                                               // รƒยกร‚ยปรขโ€žยข  [LATIN SMALL LETTER O WITH CIRCUMFLEX AND DOT BELOW]
+                                               case '\u1EDB': 
+                                               // รƒยกร‚ยปรขโ‚ฌยบ  [LATIN SMALL LETTER O WITH HORN AND ACUTE]
+                                               case '\u1EDD': 
+                                               // รƒยกร‚ยปรฏยฟยฝ  [LATIN SMALL LETTER O WITH HORN AND GRAVE]
+                                               case '\u1EDF': 
+                                               // รƒยกร‚ยปร…ยธ  [LATIN SMALL LETTER O WITH HORN AND HOOK ABOVE]
+                                               case '\u1EE1': 
+                                               // รƒยกร‚ยปร‚ยก  [LATIN SMALL LETTER O WITH HORN AND TILDE]
+                                               case '\u1EE3': 
+                                               // รƒยกร‚ยปร‚ยฃ  [LATIN SMALL LETTER O WITH HORN AND DOT BELOW]
+                                               case '\u2092': 
+                                               // รƒยขรขโ‚ฌลกรขโ‚ฌโ„ข  [LATIN SUBSCRIPT SMALL LETTER O]
+                                               case '\u24DE': 
+                                               // รƒยขรขโ‚ฌล“ร…ยพ  [CIRCLED LATIN SMALL LETTER O]
+                                               case '\u2C7A': 
+                                               // รƒยขร‚ยฑร‚ยบ  [LATIN SMALL LETTER O WITH LOW RING INSIDE]
+                                               case '\uA74B': 
+                                               // รƒยชรฏยฟยฝรขโ‚ฌยน  [LATIN SMALL LETTER O WITH LONG STROKE OVERLAY]
+                                               case '\uA74D': 
+                                               // รƒยชรฏยฟยฝรฏยฟยฝ  [LATIN SMALL LETTER O WITH LOOP]
+                                               case '\uFF4F':  // รƒยฏร‚ยฝรฏยฟยฝ  [FULLWIDTH LATIN SMALL LETTER O]
+                                                       output[outputPos++] = 'o';
+                                                       break;
+                                               
+                                               case '\u0152': 
+                                               // รƒโ€ฆรขโ‚ฌโ„ข  [LATIN CAPITAL LIGATURE OE]
+                                               case '\u0276':  // รƒโ€ฐร‚ยถ  [LATIN LETTER SMALL CAPITAL OE]
+                                                       output[outputPos++] = 'O';
+                                                       output[outputPos++] = 'E';
+                                                       break;
+                                               
+                                               case '\uA74E':  // รƒยชรฏยฟยฝร…ยฝ  [LATIN CAPITAL LETTER OO]
+                                                       output[outputPos++] = 'O';
+                                                       output[outputPos++] = 'O';
+                                                       break;
+                                               
+                                               case '\u0222': 
+                                               // รƒห†ร‚ยข  http://en.wikipedia.org/wiki/OU  [LATIN CAPITAL LETTER OU]
+                                               case '\u1D15':  // รƒยกร‚ยดรขโ‚ฌยข  [LATIN LETTER SMALL CAPITAL OU]
+                                                       output[outputPos++] = 'O';
+                                                       output[outputPos++] = 'U';
+                                                       break;
+                                               
+                                               case '\u24AA':  // รƒยขรขโ‚ฌโ„ขร‚ยช  [PARENTHESIZED LATIN SMALL LETTER O]
+                                                       output[outputPos++] = '(';
+                                                       output[outputPos++] = 'o';
+                                                       output[outputPos++] = ')';
+                                                       break;
+                                               
+                                               case '\u0153': 
+                                               // รƒโ€ฆรขโ‚ฌล“  [LATIN SMALL LIGATURE OE]
+                                               case '\u1D14':  // รƒยกร‚ยดรฏยฟยฝ?  [LATIN SMALL LETTER TURNED OE]
+                                                       output[outputPos++] = 'o';
+                                                       output[outputPos++] = 'e';
+                                                       break;
+                                               
+                                               case '\uA74F':  // รƒยชรฏยฟยฝรฏยฟยฝ  [LATIN SMALL LETTER OO]
+                                                       output[outputPos++] = 'o';
+                                                       output[outputPos++] = 'o';
+                                                       break;
+                                               
+                                               case '\u0223':  // รƒห†ร‚ยฃ  http://en.wikipedia.org/wiki/OU  [LATIN SMALL LETTER OU]
+                                                       output[outputPos++] = 'o';
+                                                       output[outputPos++] = 'u';
+                                                       break;
+                                               
+                                               case '\u01A4': 
+                                               // รƒโ€ ร‚ยค  [LATIN CAPITAL LETTER P WITH HOOK]
+                                               case '\u1D18': 
+                                               // รƒยกร‚ยดร‹ล“  [LATIN LETTER SMALL CAPITAL P]
+                                               case '\u1E54': 
+                                               // รƒยกร‚ยนรฏยฟยฝ?  [LATIN CAPITAL LETTER P WITH ACUTE]
+                                               case '\u1E56': 
+                                               // รƒยกร‚ยนรขโ‚ฌโ€œ  [LATIN CAPITAL LETTER P WITH DOT ABOVE]
+                                               case '\u24C5': 
+                                               // รƒยขรขโ‚ฌล“รขโ‚ฌยฆ  [CIRCLED LATIN CAPITAL LETTER P]
+                                               case '\u2C63': 
+                                               // รƒยขร‚ยฑร‚ยฃ  [LATIN CAPITAL LETTER P WITH STROKE]
+                                               case '\uA750': 
+                                               // รƒยชรฏยฟยฝรฏยฟยฝ  [LATIN CAPITAL LETTER P WITH STROKE THROUGH DESCENDER]
+                                               case '\uA752': 
+                                               // รƒยชรฏยฟยฝรขโ‚ฌโ„ข  [LATIN CAPITAL LETTER P WITH FLOURISH]
+                                               case '\uA754': 
+                                               // รƒยชรฏยฟยฝรฏยฟยฝ?  [LATIN CAPITAL LETTER P WITH SQUIRREL TAIL]
+                                               case '\uFF30':  // รƒยฏร‚ยผร‚ยฐ  [FULLWIDTH LATIN CAPITAL LETTER P]
+                                                       output[outputPos++] = 'P';
+                                                       break;
+                                               
+                                               case '\u01A5': 
+                                               // รƒโ€ ร‚ยฅ  [LATIN SMALL LETTER P WITH HOOK]
+                                               case '\u1D71': 
+                                               // รƒยกร‚ยตร‚ยฑ  [LATIN SMALL LETTER P WITH MIDDLE TILDE]
+                                               case '\u1D7D': 
+                                               // รƒยกร‚ยตร‚ยฝ  [LATIN SMALL LETTER P WITH STROKE]
+                                               case '\u1D88': 
+                                               // รƒยกร‚ยถร‹โ€   [LATIN SMALL LETTER P WITH PALATAL HOOK]
+                                               case '\u1E55': 
+                                               // รƒยกร‚ยนรขโ‚ฌยข  [LATIN SMALL LETTER P WITH ACUTE]
+                                               case '\u1E57': 
+                                               // รƒยกร‚ยนรขโ‚ฌโ€  [LATIN SMALL LETTER P WITH DOT ABOVE]
+                                               case '\u24DF': 
+                                               // รƒยขรขโ‚ฌล“ร…ยธ  [CIRCLED LATIN SMALL LETTER P]
+                                               case '\uA751': 
+                                               // รƒยชรฏยฟยฝรขโ‚ฌหœ  [LATIN SMALL LETTER P WITH STROKE THROUGH DESCENDER]
+                                               case '\uA753': 
+                                               // รƒยชรฏยฟยฝรขโ‚ฌล“  [LATIN SMALL LETTER P WITH FLOURISH]
+                                               case '\uA755': 
+                                               // รƒยชรฏยฟยฝรขโ‚ฌยข  [LATIN SMALL LETTER P WITH SQUIRREL TAIL]
+                                               case '\uA7FC': 
+                                               // รƒยชร…ยธร‚ยผ  [LATIN EPIGRAPHIC LETTER REVERSED P]
+                                               case '\uFF50':  // รƒยฏร‚ยฝรฏยฟยฝ  [FULLWIDTH LATIN SMALL LETTER P]
+                                                       output[outputPos++] = 'p';
+                                                       break;
+                                               
+                                               case '\u24AB':  // รƒยขรขโ‚ฌโ„ขร‚ยซ  [PARENTHESIZED LATIN SMALL LETTER P]
+                                                       output[outputPos++] = '(';
+                                                       output[outputPos++] = 'p';
+                                                       output[outputPos++] = ')';
+                                                       break;
+                                               
+                                               case '\u024A': 
+                                               // รƒโ€ฐร…ย   [LATIN CAPITAL LETTER SMALL Q WITH HOOK TAIL]
+                                               case '\u24C6': 
+                                               // รƒยขรขโ‚ฌล“รขโ‚ฌย   [CIRCLED LATIN CAPITAL LETTER Q]
+                                               case '\uA756': 
+                                               // รƒยชรฏยฟยฝรขโ‚ฌโ€œ  [LATIN CAPITAL LETTER Q WITH STROKE THROUGH DESCENDER]
+                                               case '\uA758': 
+                                               // รƒยชรฏยฟยฝร‹ล“  [LATIN CAPITAL LETTER Q WITH DIAGONAL STROKE]
+                                               case '\uFF31':  // รƒยฏร‚ยผร‚ยฑ  [FULLWIDTH LATIN CAPITAL LETTER Q]
+                                                       output[outputPos++] = 'Q';
+                                                       break;
+                                               
+                                               case '\u0138': 
+                                               // รƒโ€žร‚ยธ  http://en.wikipedia.org/wiki/Kra_(letter)  [LATIN SMALL LETTER KRA]
+                                               case '\u024B': 
+                                               // รƒโ€ฐรขโ‚ฌยน  [LATIN SMALL LETTER Q WITH HOOK TAIL]
+                                               case '\u02A0': 
+                                               // รƒล ร‚ย   [LATIN SMALL LETTER Q WITH HOOK]
+                                               case '\u24E0': 
+                                               // รƒยขรขโ‚ฌล“ร‚ย   [CIRCLED LATIN SMALL LETTER Q]
+                                               case '\uA757': 
+                                               // รƒยชรฏยฟยฝรขโ‚ฌโ€  [LATIN SMALL LETTER Q WITH STROKE THROUGH DESCENDER]
+                                               case '\uA759': 
+                                               // รƒยชรฏยฟยฝรขโ€žยข  [LATIN SMALL LETTER Q WITH DIAGONAL STROKE]
+                                               case '\uFF51':  // รƒยฏร‚ยฝรขโ‚ฌหœ  [FULLWIDTH LATIN SMALL LETTER Q]
+                                                       output[outputPos++] = 'q';
+                                                       break;
+                                               
+                                               case '\u24AC':  // รƒยขรขโ‚ฌโ„ขร‚ยฌ  [PARENTHESIZED LATIN SMALL LETTER Q]
+                                                       output[outputPos++] = '(';
+                                                       output[outputPos++] = 'q';
+                                                       output[outputPos++] = ')';
+                                                       break;
+                                               
+                                               case '\u0239':  // รƒห†ร‚ยน  [LATIN SMALL LETTER QP DIGRAPH]
+                                                       output[outputPos++] = 'q';
+                                                       output[outputPos++] = 'p';
+                                                       break;
+                                               
+                                               case '\u0154': 
+                                               // รƒโ€ฆรฏยฟยฝ?  [LATIN CAPITAL LETTER R WITH ACUTE]
+                                               case '\u0156': 
+                                               // รƒโ€ฆรขโ‚ฌโ€œ  [LATIN CAPITAL LETTER R WITH CEDILLA]
+                                               case '\u0158': 
+                                               // รƒโ€ฆร‹ล“  [LATIN CAPITAL LETTER R WITH CARON]
+                                               case '\u0210': 
+                                               // รƒห†รขโ‚ฌโ„ข  [LATIN CAPITAL LETTER R WITH DOUBLE GRAVE]
+                                               case '\u0212': 
+                                               // รƒห†รขโ‚ฌโ„ข  [LATIN CAPITAL LETTER R WITH INVERTED BREVE]
+                                               case '\u024C': 
+                                               // รƒโ€ฐร…โ€™  [LATIN CAPITAL LETTER R WITH STROKE]
+                                               case '\u0280': 
+                                               // รƒล รขโ€šยฌ  [LATIN LETTER SMALL CAPITAL R]
+                                               case '\u0281': 
+                                               // รƒล รฏยฟยฝ  [LATIN LETTER SMALL CAPITAL INVERTED R]
+                                               case '\u1D19': 
+                                               // รƒยกร‚ยดรขโ€žยข  [LATIN LETTER SMALL CAPITAL REVERSED R]
+                                               case '\u1D1A': 
+                                               // รƒยกร‚ยดร…ยก  [LATIN LETTER SMALL CAPITAL TURNED R]
+                                               case '\u1E58': 
+                                               // รƒยกร‚ยนร‹ล“  [LATIN CAPITAL LETTER R WITH DOT ABOVE]
+                                               case '\u1E5A': 
+                                               // รƒยกร‚ยนร…ยก  [LATIN CAPITAL LETTER R WITH DOT BELOW]
+                                               case '\u1E5C': 
+                                               // รƒยกร‚ยนร…โ€œ  [LATIN CAPITAL LETTER R WITH DOT BELOW AND MACRON]
+                                               case '\u1E5E': 
+                                               // รƒยกร‚ยนร…ยพ  [LATIN CAPITAL LETTER R WITH LINE BELOW]
+                                               case '\u24C7': 
+                                               // รƒยขรขโ‚ฌล“รขโ‚ฌยก  [CIRCLED LATIN CAPITAL LETTER R]
+                                               case '\u2C64': 
+                                               // รƒยขร‚ยฑร‚ยค  [LATIN CAPITAL LETTER R WITH TAIL]
+                                               case '\uA75A': 
+                                               // รƒยชรฏยฟยฝร…ยก  [LATIN CAPITAL LETTER R ROTUNDA]
+                                               case '\uA782': 
+                                               // รƒยชร…ยพรขโ‚ฌลก  [LATIN CAPITAL LETTER INSULAR R]
+                                               case '\uFF32':  // รƒยฏร‚ยผร‚ยฒ  [FULLWIDTH LATIN CAPITAL LETTER R]
+                                                       output[outputPos++] = 'R';
+                                                       break;
+                                               
+                                               case '\u0155': 
+                                               // รƒโ€ฆรขโ‚ฌยข  [LATIN SMALL LETTER R WITH ACUTE]
+                                               case '\u0157': 
+                                               // รƒโ€ฆรขโ‚ฌโ€  [LATIN SMALL LETTER R WITH CEDILLA]
+                                               case '\u0159': 
+                                               // รƒโ€ฆรขโ€žยข  [LATIN SMALL LETTER R WITH CARON]
+                                               case '\u0211': 
+                                               // รƒห†รขโ‚ฌหœ  [LATIN SMALL LETTER R WITH DOUBLE GRAVE]
+                                               case '\u0213': 
+                                               // รƒห†รขโ‚ฌล“  [LATIN SMALL LETTER R WITH INVERTED BREVE]
+                                               case '\u024D': 
+                                               // รƒโ€ฐรฏยฟยฝ  [LATIN SMALL LETTER R WITH STROKE]
+                                               case '\u027C': 
+                                               // รƒโ€ฐร‚ยผ  [LATIN SMALL LETTER R WITH LONG LEG]
+                                               case '\u027D': 
+                                               // รƒโ€ฐร‚ยฝ  [LATIN SMALL LETTER R WITH TAIL]
+                                               case '\u027E': 
+                                               // รƒโ€ฐร‚ยพ  [LATIN SMALL LETTER R WITH FISHHOOK]
+                                               case '\u027F': 
+                                               // รƒโ€ฐร‚ยฟ  [LATIN SMALL LETTER REVERSED R WITH FISHHOOK]
+                                               case '\u1D63': 
+                                               // รƒยกร‚ยตร‚ยฃ  [LATIN SUBSCRIPT SMALL LETTER R]
+                                               case '\u1D72': 
+                                               // รƒยกร‚ยตร‚ยฒ  [LATIN SMALL LETTER R WITH MIDDLE TILDE]
+                                               case '\u1D73': 
+                                               // รƒยกร‚ยตร‚ยณ  [LATIN SMALL LETTER R WITH FISHHOOK AND MIDDLE TILDE]
+                                               case '\u1D89': 
+                                               // รƒยกร‚ยถรขโ‚ฌยฐ  [LATIN SMALL LETTER R WITH PALATAL HOOK]
+                                               case '\u1E59': 
+                                               // รƒยกร‚ยนรขโ€žยข  [LATIN SMALL LETTER R WITH DOT ABOVE]
+                                               case '\u1E5B': 
+                                               // รƒยกร‚ยนรขโ‚ฌยบ  [LATIN SMALL LETTER R WITH DOT BELOW]
+                                               case '\u1E5D': 
+                                               // รƒยกร‚ยนรฏยฟยฝ  [LATIN SMALL LETTER R WITH DOT BELOW AND MACRON]
+                                               case '\u1E5F': 
+                                               // รƒยกร‚ยนร…ยธ  [LATIN SMALL LETTER R WITH LINE BELOW]
+                                               case '\u24E1': 
+                                               // รƒยขรขโ‚ฌล“ร‚ยก  [CIRCLED LATIN SMALL LETTER R]
+                                               case '\uA75B': 
+                                               // รƒยชรฏยฟยฝรขโ‚ฌยบ  [LATIN SMALL LETTER R ROTUNDA]
+                                               case '\uA783': 
+                                               // รƒยชร…ยพร†โ€™  [LATIN SMALL LETTER INSULAR R]
+                                               case '\uFF52':  // รƒยฏร‚ยฝรขโ‚ฌโ„ข  [FULLWIDTH LATIN SMALL LETTER R]
+                                                       output[outputPos++] = 'r';
+                                                       break;
+                                               
+                                               case '\u24AD':  // รƒยขรขโ‚ฌโ„ขร‚ยญ  [PARENTHESIZED LATIN SMALL LETTER R]
+                                                       output[outputPos++] = '(';
+                                                       output[outputPos++] = 'r';
+                                                       output[outputPos++] = ')';
+                                                       break;
+                                               
+                                               case '\u015A': 
+                                               // รƒโ€ฆร…ยก  [LATIN CAPITAL LETTER S WITH ACUTE]
+                                               case '\u015C': 
+                                               // รƒโ€ฆร…โ€œ  [LATIN CAPITAL LETTER S WITH CIRCUMFLEX]
+                                               case '\u015E': 
+                                               // รƒโ€ฆร…ยพ  [LATIN CAPITAL LETTER S WITH CEDILLA]
+                                               case '\u0160': 
+                                               // รƒโ€ฆร‚ย   [LATIN CAPITAL LETTER S WITH CARON]
+                                               case '\u0218': 
+                                               // รƒห†ร‹ล“  [LATIN CAPITAL LETTER S WITH COMMA BELOW]
+                                               case '\u1E60': 
+                                               // รƒยกร‚ยนร‚ย   [LATIN CAPITAL LETTER S WITH DOT ABOVE]
+                                               case '\u1E62': 
+                                               // รƒยกร‚ยนร‚ยข  [LATIN CAPITAL LETTER S WITH DOT BELOW]
+                                               case '\u1E64': 
+                                               // รƒยกร‚ยนร‚ยค  [LATIN CAPITAL LETTER S WITH ACUTE AND DOT ABOVE]
+                                               case '\u1E66': 
+                                               // รƒยกร‚ยนร‚ยฆ  [LATIN CAPITAL LETTER S WITH CARON AND DOT ABOVE]
+                                               case '\u1E68': 
+                                               // รƒยกร‚ยนร‚ยจ  [LATIN CAPITAL LETTER S WITH DOT BELOW AND DOT ABOVE]
+                                               case '\u24C8': 
+                                               // รƒยขรขโ‚ฌล“ร‹โ€   [CIRCLED LATIN CAPITAL LETTER S]
+                                               case '\uA731': 
+                                               // รƒยชร…โ€œร‚ยฑ  [LATIN LETTER SMALL CAPITAL S]
+                                               case '\uA785': 
+                                               // รƒยชร…ยพรขโ‚ฌยฆ  [LATIN SMALL LETTER INSULAR S]
+                                               case '\uFF33':  // รƒยฏร‚ยผร‚ยณ  [FULLWIDTH LATIN CAPITAL LETTER S]
+                                                       output[outputPos++] = 'S';
+                                                       break;
+                                               
+                                               case '\u015B': 
+                                               // รƒโ€ฆรขโ‚ฌยบ  [LATIN SMALL LETTER S WITH ACUTE]
+                                               case '\u015D': 
+                                               // รƒโ€ฆรฏยฟยฝ  [LATIN SMALL LETTER S WITH CIRCUMFLEX]
+                                               case '\u015F': 
+                                               // รƒโ€ฆร…ยธ  [LATIN SMALL LETTER S WITH CEDILLA]
+                                               case '\u0161': 
+                                               // รƒโ€ฆร‚ยก  [LATIN SMALL LETTER S WITH CARON]
+                                               case '\u017F': 
+                                               // รƒโ€ฆร‚ยฟ  http://en.wikipedia.org/wiki/Long_S  [LATIN SMALL LETTER LONG S]
+                                               case '\u0219': 
+                                               // รƒห†รขโ€žยข  [LATIN SMALL LETTER S WITH COMMA BELOW]
+                                               case '\u023F': 
+                                               // รƒห†ร‚ยฟ  [LATIN SMALL LETTER S WITH SWASH TAIL]
+                                               case '\u0282': 
+                                               // รƒล รขโ‚ฌลก  [LATIN SMALL LETTER S WITH HOOK]
+                                               case '\u1D74': 
+                                               // รƒยกร‚ยตร‚ยด  [LATIN SMALL LETTER S WITH MIDDLE TILDE]
+                                               case '\u1D8A': 
+                                               // รƒยกร‚ยถร…ย   [LATIN SMALL LETTER S WITH PALATAL HOOK]
+                                               case '\u1E61': 
+                                               // รƒยกร‚ยนร‚ยก  [LATIN SMALL LETTER S WITH DOT ABOVE]
+                                               case '\u1E63': 
+                                               // รƒยกร‚ยนร‚ยฃ  [LATIN SMALL LETTER S WITH DOT BELOW]
+                                               case '\u1E65': 
+                                               // รƒยกร‚ยนร‚ยฅ  [LATIN SMALL LETTER S WITH ACUTE AND DOT ABOVE]
+                                               case '\u1E67': 
+                                               // รƒยกร‚ยนร‚ยง  [LATIN SMALL LETTER S WITH CARON AND DOT ABOVE]
+                                               case '\u1E69': 
+                                               // รƒยกร‚ยนร‚ยฉ  [LATIN SMALL LETTER S WITH DOT BELOW AND DOT ABOVE]
+                                               case '\u1E9C': 
+                                               // รƒยกร‚ยบร…โ€œ  [LATIN SMALL LETTER LONG S WITH DIAGONAL STROKE]
+                                               case '\u1E9D': 
+                                               // รƒยกร‚ยบรฏยฟยฝ  [LATIN SMALL LETTER LONG S WITH HIGH STROKE]
+                                               case '\u24E2': 
+                                               // รƒยขรขโ‚ฌล“ร‚ยข  [CIRCLED LATIN SMALL LETTER S]
+                                               case '\uA784': 
+                                               // รƒยชร…ยพรขโ‚ฌลพ  [LATIN CAPITAL LETTER INSULAR S]
+                                               case '\uFF53':  // รƒยฏร‚ยฝรขโ‚ฌล“  [FULLWIDTH LATIN SMALL LETTER S]
+                                                       output[outputPos++] = 's';
+                                                       break;
+                                               
+                                               case '\u1E9E':  // รƒยกร‚ยบร…ยพ  [LATIN CAPITAL LETTER SHARP S]
+                                                       output[outputPos++] = 'S';
+                                                       output[outputPos++] = 'S';
+                                                       break;
+                                               
+                                               case '\u24AE':  // รƒยขรขโ‚ฌโ„ขร‚ยฎ  [PARENTHESIZED LATIN SMALL LETTER S]
+                                                       output[outputPos++] = '(';
+                                                       output[outputPos++] = 's';
+                                                       output[outputPos++] = ')';
+                                                       break;
+                                               
+                                               case '\u00DF':  // รƒฦ’ร…ยธ  [LATIN SMALL LETTER SHARP S]
+                                                       output[outputPos++] = 's';
+                                                       output[outputPos++] = 's';
+                                                       break;
+                                               
+                                               case '\uFB06':  // รƒยฏร‚ยฌรขโ‚ฌย   [LATIN SMALL LIGATURE ST]
+                                                       output[outputPos++] = 's';
+                                                       output[outputPos++] = 't';
+                                                       break;
+                                               
+                                               case '\u0162': 
+                                               // รƒโ€ฆร‚ยข  [LATIN CAPITAL LETTER T WITH CEDILLA]
+                                               case '\u0164': 
+                                               // รƒโ€ฆร‚ยค  [LATIN CAPITAL LETTER T WITH CARON]
+                                               case '\u0166': 
+                                               // รƒโ€ฆร‚ยฆ  [LATIN CAPITAL LETTER T WITH STROKE]
+                                               case '\u01AC': 
+                                               // รƒโ€ ร‚ยฌ  [LATIN CAPITAL LETTER T WITH HOOK]
+                                               case '\u01AE': 
+                                               // รƒโ€ ร‚ยฎ  [LATIN CAPITAL LETTER T WITH RETROFLEX HOOK]
+                                               case '\u021A': 
+                                               // รƒห†ร…ยก  [LATIN CAPITAL LETTER T WITH COMMA BELOW]
+                                               case '\u023E': 
+                                               // รƒห†ร‚ยพ  [LATIN CAPITAL LETTER T WITH DIAGONAL STROKE]
+                                               case '\u1D1B': 
+                                               // รƒยกร‚ยดรขโ‚ฌยบ  [LATIN LETTER SMALL CAPITAL T]
+                                               case '\u1E6A': 
+                                               // รƒยกร‚ยนร‚ยช  [LATIN CAPITAL LETTER T WITH DOT ABOVE]
+                                               case '\u1E6C': 
+                                               // รƒยกร‚ยนร‚ยฌ  [LATIN CAPITAL LETTER T WITH DOT BELOW]
+                                               case '\u1E6E': 
+                                               // รƒยกร‚ยนร‚ยฎ  [LATIN CAPITAL LETTER T WITH LINE BELOW]
+                                               case '\u1E70': 
+                                               // รƒยกร‚ยนร‚ยฐ  [LATIN CAPITAL LETTER T WITH CIRCUMFLEX BELOW]
+                                               case '\u24C9': 
+                                               // รƒยขรขโ‚ฌล“รขโ‚ฌยฐ  [CIRCLED LATIN CAPITAL LETTER T]
+                                               case '\uA786': 
+                                               // รƒยชร…ยพรขโ‚ฌย   [LATIN CAPITAL LETTER INSULAR T]
+                                               case '\uFF34':  // รƒยฏร‚ยผร‚ยด  [FULLWIDTH LATIN CAPITAL LETTER T]
+                                                       output[outputPos++] = 'T';
+                                                       break;
+                                               
+                                               case '\u0163': 
+                                               // รƒโ€ฆร‚ยฃ  [LATIN SMALL LETTER T WITH CEDILLA]
+                                               case '\u0165': 
+                                               // รƒโ€ฆร‚ยฅ  [LATIN SMALL LETTER T WITH CARON]
+                                               case '\u0167': 
+                                               // รƒโ€ฆร‚ยง  [LATIN SMALL LETTER T WITH STROKE]
+                                               case '\u01AB': 
+                                               // รƒโ€ ร‚ยซ  [LATIN SMALL LETTER T WITH PALATAL HOOK]
+                                               case '\u01AD': 
+                                               // รƒโ€ ร‚ยญ  [LATIN SMALL LETTER T WITH HOOK]
+                                               case '\u021B': 
+                                               // รƒห†รขโ‚ฌยบ  [LATIN SMALL LETTER T WITH COMMA BELOW]
+                                               case '\u0236': 
+                                               // รƒห†ร‚ยถ  [LATIN SMALL LETTER T WITH CURL]
+                                               case '\u0287': 
+                                               // รƒล รขโ‚ฌยก  [LATIN SMALL LETTER TURNED T]
+                                               case '\u0288': 
+                                               // รƒล ร‹โ€   [LATIN SMALL LETTER T WITH RETROFLEX HOOK]
+                                               case '\u1D75': 
+                                               // รƒยกร‚ยตร‚ยต  [LATIN SMALL LETTER T WITH MIDDLE TILDE]
+                                               case '\u1E6B': 
+                                               // รƒยกร‚ยนร‚ยซ  [LATIN SMALL LETTER T WITH DOT ABOVE]
+                                               case '\u1E6D': 
+                                               // รƒยกร‚ยนร‚ยญ  [LATIN SMALL LETTER T WITH DOT BELOW]
+                                               case '\u1E6F': 
+                                               // รƒยกร‚ยนร‚ยฏ  [LATIN SMALL LETTER T WITH LINE BELOW]
+                                               case '\u1E71': 
+                                               // รƒยกร‚ยนร‚ยฑ  [LATIN SMALL LETTER T WITH CIRCUMFLEX BELOW]
+                                               case '\u1E97': 
+                                               // รƒยกร‚ยบรขโ‚ฌโ€  [LATIN SMALL LETTER T WITH DIAERESIS]
+                                               case '\u24E3': 
+                                               // รƒยขรขโ‚ฌล“ร‚ยฃ  [CIRCLED LATIN SMALL LETTER T]
+                                               case '\u2C66': 
+                                               // รƒยขร‚ยฑร‚ยฆ  [LATIN SMALL LETTER T WITH DIAGONAL STROKE]
+                                               case '\uFF54':  // รƒยฏร‚ยฝรฏยฟยฝ?  [FULLWIDTH LATIN SMALL LETTER T]
+                                                       output[outputPos++] = 't';
+                                                       break;
+                                               
+                                               case '\u00DE': 
+                                               // รƒฦ’ร…ยพ  [LATIN CAPITAL LETTER THORN]
+                                               case '\uA766':  // รƒยชรฏยฟยฝร‚ยฆ  [LATIN CAPITAL LETTER THORN WITH STROKE THROUGH DESCENDER]
+                                                       output[outputPos++] = 'T';
+                                                       output[outputPos++] = 'H';
+                                                       break;
+                                               
+                                               case '\uA728':  // รƒยชร…โ€œร‚ยจ  [LATIN CAPITAL LETTER TZ]
+                                                       output[outputPos++] = 'T';
+                                                       output[outputPos++] = 'Z';
+                                                       break;
+                                               
+                                               case '\u24AF':  // รƒยขรขโ‚ฌโ„ขร‚ยฏ  [PARENTHESIZED LATIN SMALL LETTER T]
+                                                       output[outputPos++] = '(';
+                                                       output[outputPos++] = 't';
+                                                       output[outputPos++] = ')';
+                                                       break;
+                                               
+                                               case '\u02A8':  // รƒล ร‚ยจ  [LATIN SMALL LETTER TC DIGRAPH WITH CURL]
+                                                       output[outputPos++] = 't';
+                                                       output[outputPos++] = 'c';
+                                                       break;
+                                               
+                                               case '\u00FE': 
+                                               // รƒฦ’ร‚ยพ  [LATIN SMALL LETTER THORN]
+                                               case '\u1D7A': 
+                                               // รƒยกร‚ยตร‚ยบ  [LATIN SMALL LETTER TH WITH STRIKETHROUGH]
+                                               case '\uA767':  // รƒยชรฏยฟยฝร‚ยง  [LATIN SMALL LETTER THORN WITH STROKE THROUGH DESCENDER]
+                                                       output[outputPos++] = 't';
+                                                       output[outputPos++] = 'h';
+                                                       break;
+                                               
+                                               case '\u02A6':  // รƒล ร‚ยฆ  [LATIN SMALL LETTER TS DIGRAPH]
+                                                       output[outputPos++] = 't';
+                                                       output[outputPos++] = 's';
+                                                       break;
+                                               
+                                               case '\uA729':  // รƒยชร…โ€œร‚ยฉ  [LATIN SMALL LETTER TZ]
+                                                       output[outputPos++] = 't';
+                                                       output[outputPos++] = 'z';
+                                                       break;
+                                               
+                                               case '\u00D9': 
+                                               // รƒฦ’รขโ€žยข  [LATIN CAPITAL LETTER U WITH GRAVE]
+                                               case '\u00DA': 
+                                               // รƒฦ’ร…ยก  [LATIN CAPITAL LETTER U WITH ACUTE]
+                                               case '\u00DB': 
+                                               // รƒฦ’รขโ‚ฌยบ  [LATIN CAPITAL LETTER U WITH CIRCUMFLEX]
+                                               case '\u00DC': 
+                                               // รƒฦ’ร…โ€œ  [LATIN CAPITAL LETTER U WITH DIAERESIS]
+                                               case '\u0168': 
+                                               // รƒโ€ฆร‚ยจ  [LATIN CAPITAL LETTER U WITH TILDE]
+                                               case '\u016A': 
+                                               // รƒโ€ฆร‚ยช  [LATIN CAPITAL LETTER U WITH MACRON]
+                                               case '\u016C': 
+                                               // รƒโ€ฆร‚ยฌ  [LATIN CAPITAL LETTER U WITH BREVE]
+                                               case '\u016E': 
+                                               // รƒโ€ฆร‚ยฎ  [LATIN CAPITAL LETTER U WITH RING ABOVE]
+                                               case '\u0170': 
+                                               // รƒโ€ฆร‚ยฐ  [LATIN CAPITAL LETTER U WITH DOUBLE ACUTE]
+                                               case '\u0172': 
+                                               // รƒโ€ฆร‚ยฒ  [LATIN CAPITAL LETTER U WITH OGONEK]
+                                               case '\u01AF': 
+                                               // รƒโ€ ร‚ยฏ  [LATIN CAPITAL LETTER U WITH HORN]
+                                               case '\u01D3': 
+                                               // รƒโ€กรขโ‚ฌล“  [LATIN CAPITAL LETTER U WITH CARON]
+                                               case '\u01D5': 
+                                               // รƒโ€กรขโ‚ฌยข  [LATIN CAPITAL LETTER U WITH DIAERESIS AND MACRON]
+                                               case '\u01D7': 
+                                               // รƒโ€กรขโ‚ฌโ€  [LATIN CAPITAL LETTER U WITH DIAERESIS AND ACUTE]
+                                               case '\u01D9': 
+                                               // รƒโ€กรขโ€žยข  [LATIN CAPITAL LETTER U WITH DIAERESIS AND CARON]
+                                               case '\u01DB': 
+                                               // รƒโ€กรขโ‚ฌยบ  [LATIN CAPITAL LETTER U WITH DIAERESIS AND GRAVE]
+                                               case '\u0214': 
+                                               // รƒห†รฏยฟยฝ?  [LATIN CAPITAL LETTER U WITH DOUBLE GRAVE]
+                                               case '\u0216': 
+                                               // รƒห†รขโ‚ฌโ€œ  [LATIN CAPITAL LETTER U WITH INVERTED BREVE]
+                                               case '\u0244': 
+                                               // รƒโ€ฐรขโ‚ฌลพ  [LATIN CAPITAL LETTER U BAR]
+                                               case '\u1D1C': 
+                                               // รƒยกร‚ยดร…โ€œ  [LATIN LETTER SMALL CAPITAL U]
+                                               case '\u1D7E': 
+                                               // รƒยกร‚ยตร‚ยพ  [LATIN SMALL CAPITAL LETTER U WITH STROKE]
+                                               case '\u1E72': 
+                                               // รƒยกร‚ยนร‚ยฒ  [LATIN CAPITAL LETTER U WITH DIAERESIS BELOW]
+                                               case '\u1E74': 
+                                               // รƒยกร‚ยนร‚ยด  [LATIN CAPITAL LETTER U WITH TILDE BELOW]
+                                               case '\u1E76': 
+                                               // รƒยกร‚ยนร‚ยถ  [LATIN CAPITAL LETTER U WITH CIRCUMFLEX BELOW]
+                                               case '\u1E78': 
+                                               // รƒยกร‚ยนร‚ยธ  [LATIN CAPITAL LETTER U WITH TILDE AND ACUTE]
+                                               case '\u1E7A': 
+                                               // รƒยกร‚ยนร‚ยบ  [LATIN CAPITAL LETTER U WITH MACRON AND DIAERESIS]
+                                               case '\u1EE4': 
+                                               // รƒยกร‚ยปร‚ยค  [LATIN CAPITAL LETTER U WITH DOT BELOW]
+                                               case '\u1EE6': 
+                                               // รƒยกร‚ยปร‚ยฆ  [LATIN CAPITAL LETTER U WITH HOOK ABOVE]
+                                               case '\u1EE8': 
+                                               // รƒยกร‚ยปร‚ยจ  [LATIN CAPITAL LETTER U WITH HORN AND ACUTE]
+                                               case '\u1EEA': 
+                                               // รƒยกร‚ยปร‚ยช  [LATIN CAPITAL LETTER U WITH HORN AND GRAVE]
+                                               case '\u1EEC': 
+                                               // รƒยกร‚ยปร‚ยฌ  [LATIN CAPITAL LETTER U WITH HORN AND HOOK ABOVE]
+                                               case '\u1EEE': 
+                                               // รƒยกร‚ยปร‚ยฎ  [LATIN CAPITAL LETTER U WITH HORN AND TILDE]
+                                               case '\u1EF0': 
+                                               // รƒยกร‚ยปร‚ยฐ  [LATIN CAPITAL LETTER U WITH HORN AND DOT BELOW]
+                                               case '\u24CA': 
+                                               // รƒยขรขโ‚ฌล“ร…ย   [CIRCLED LATIN CAPITAL LETTER U]
+                                               case '\uFF35':  // รƒยฏร‚ยผร‚ยต  [FULLWIDTH LATIN CAPITAL LETTER U]
+                                                       output[outputPos++] = 'U';
+                                                       break;
+                                               
+                                               case '\u00F9': 
+                                               // รƒฦ’ร‚ยน  [LATIN SMALL LETTER U WITH GRAVE]
+                                               case '\u00FA': 
+                                               // รƒฦ’ร‚ยบ  [LATIN SMALL LETTER U WITH ACUTE]
+                                               case '\u00FB': 
+                                               // รƒฦ’ร‚ยป  [LATIN SMALL LETTER U WITH CIRCUMFLEX]
+                                               case '\u00FC': 
+                                               // รƒฦ’ร‚ยผ  [LATIN SMALL LETTER U WITH DIAERESIS]
+                                               case '\u0169': 
+                                               // รƒโ€ฆร‚ยฉ  [LATIN SMALL LETTER U WITH TILDE]
+                                               case '\u016B': 
+                                               // รƒโ€ฆร‚ยซ  [LATIN SMALL LETTER U WITH MACRON]
+                                               case '\u016D': 
+                                               // รƒโ€ฆร‚ยญ  [LATIN SMALL LETTER U WITH BREVE]
+                                               case '\u016F': 
+                                               // รƒโ€ฆร‚ยฏ  [LATIN SMALL LETTER U WITH RING ABOVE]
+                                               case '\u0171': 
+                                               // รƒโ€ฆร‚ยฑ  [LATIN SMALL LETTER U WITH DOUBLE ACUTE]
+                                               case '\u0173': 
+                                               // รƒโ€ฆร‚ยณ  [LATIN SMALL LETTER U WITH OGONEK]
+                                               case '\u01B0': 
+                                               // รƒโ€ ร‚ยฐ  [LATIN SMALL LETTER U WITH HORN]
+                                               case '\u01D4': 
+                                               // รƒโ€กรฏยฟยฝ?  [LATIN SMALL LETTER U WITH CARON]
+                                               case '\u01D6': 
+                                               // รƒโ€กรขโ‚ฌโ€œ  [LATIN SMALL LETTER U WITH DIAERESIS AND MACRON]
+                                               case '\u01D8': 
+                                               // รƒโ€กร‹ล“  [LATIN SMALL LETTER U WITH DIAERESIS AND ACUTE]
+                                               case '\u01DA': 
+                                               // รƒโ€กร…ยก  [LATIN SMALL LETTER U WITH DIAERESIS AND CARON]
+                                               case '\u01DC': 
+                                               // รƒโ€กร…โ€œ  [LATIN SMALL LETTER U WITH DIAERESIS AND GRAVE]
+                                               case '\u0215': 
+                                               // รƒห†รขโ‚ฌยข  [LATIN SMALL LETTER U WITH DOUBLE GRAVE]
+                                               case '\u0217': 
+                                               // รƒห†รขโ‚ฌโ€  [LATIN SMALL LETTER U WITH INVERTED BREVE]
+                                               case '\u0289': 
+                                               // รƒล รขโ‚ฌยฐ  [LATIN SMALL LETTER U BAR]
+                                               case '\u1D64': 
+                                               // รƒยกร‚ยตร‚ยค  [LATIN SUBSCRIPT SMALL LETTER U]
+                                               case '\u1D99': 
+                                               // รƒยกร‚ยถรขโ€žยข  [LATIN SMALL LETTER U WITH RETROFLEX HOOK]
+                                               case '\u1E73': 
+                                               // รƒยกร‚ยนร‚ยณ  [LATIN SMALL LETTER U WITH DIAERESIS BELOW]
+                                               case '\u1E75': 
+                                               // รƒยกร‚ยนร‚ยต  [LATIN SMALL LETTER U WITH TILDE BELOW]
+                                               case '\u1E77': 
+                                               // รƒยกร‚ยนร‚ยท  [LATIN SMALL LETTER U WITH CIRCUMFLEX BELOW]
+                                               case '\u1E79': 
+                                               // รƒยกร‚ยนร‚ยน  [LATIN SMALL LETTER U WITH TILDE AND ACUTE]
+                                               case '\u1E7B': 
+                                               // รƒยกร‚ยนร‚ยป  [LATIN SMALL LETTER U WITH MACRON AND DIAERESIS]
+                                               case '\u1EE5': 
+                                               // รƒยกร‚ยปร‚ยฅ  [LATIN SMALL LETTER U WITH DOT BELOW]
+                                               case '\u1EE7': 
+                                               // รƒยกร‚ยปร‚ยง  [LATIN SMALL LETTER U WITH HOOK ABOVE]
+                                               case '\u1EE9': 
+                                               // รƒยกร‚ยปร‚ยฉ  [LATIN SMALL LETTER U WITH HORN AND ACUTE]
+                                               case '\u1EEB': 
+                                               // รƒยกร‚ยปร‚ยซ  [LATIN SMALL LETTER U WITH HORN AND GRAVE]
+                                               case '\u1EED': 
+                                               // รƒยกร‚ยปร‚ยญ  [LATIN SMALL LETTER U WITH HORN AND HOOK ABOVE]
+                                               case '\u1EEF': 
+                                               // รƒยกร‚ยปร‚ยฏ  [LATIN SMALL LETTER U WITH HORN AND TILDE]
+                                               case '\u1EF1': 
+                                               // รƒยกร‚ยปร‚ยฑ  [LATIN SMALL LETTER U WITH HORN AND DOT BELOW]
+                                               case '\u24E4': 
+                                               // รƒยขรขโ‚ฌล“ร‚ยค  [CIRCLED LATIN SMALL LETTER U]
+                                               case '\uFF55':  // รƒยฏร‚ยฝรขโ‚ฌยข  [FULLWIDTH LATIN SMALL LETTER U]
+                                                       output[outputPos++] = 'u';
+                                                       break;
+                                               
+                                               case '\u24B0':  // รƒยขรขโ‚ฌโ„ขร‚ยฐ  [PARENTHESIZED LATIN SMALL LETTER U]
+                                                       output[outputPos++] = '(';
+                                                       output[outputPos++] = 'u';
+                                                       output[outputPos++] = ')';
+                                                       break;
+                                               
+                                               case '\u1D6B':  // รƒยกร‚ยตร‚ยซ  [LATIN SMALL LETTER UE]
+                                                       output[outputPos++] = 'u';
+                                                       output[outputPos++] = 'e';
+                                                       break;
+                                               
+                                               case '\u01B2': 
+                                               // รƒโ€ ร‚ยฒ  [LATIN CAPITAL LETTER V WITH HOOK]
+                                               case '\u0245': 
+                                               // รƒโ€ฐรขโ‚ฌยฆ  [LATIN CAPITAL LETTER TURNED V]
+                                               case '\u1D20': 
+                                               // รƒยกร‚ยดร‚ย   [LATIN LETTER SMALL CAPITAL V]
+                                               case '\u1E7C': 
+                                               // รƒยกร‚ยนร‚ยผ  [LATIN CAPITAL LETTER V WITH TILDE]
+                                               case '\u1E7E': 
+                                               // รƒยกร‚ยนร‚ยพ  [LATIN CAPITAL LETTER V WITH DOT BELOW]
+                                               case '\u1EFC': 
+                                               // รƒยกร‚ยปร‚ยผ  [LATIN CAPITAL LETTER MIDDLE-WELSH V]
+                                               case '\u24CB': 
+                                               // รƒยขรขโ‚ฌล“รขโ‚ฌยน  [CIRCLED LATIN CAPITAL LETTER V]
+                                               case '\uA75E': 
+                                               // รƒยชรฏยฟยฝร…ยพ  [LATIN CAPITAL LETTER V WITH DIAGONAL STROKE]
+                                               case '\uA768': 
+                                               // รƒยชรฏยฟยฝร‚ยจ  [LATIN CAPITAL LETTER VEND]
+                                               case '\uFF36':  // รƒยฏร‚ยผร‚ยถ  [FULLWIDTH LATIN CAPITAL LETTER V]
+                                                       output[outputPos++] = 'V';
+                                                       break;
+                                               
+                                               case '\u028B': 
+                                               // รƒล รขโ‚ฌยน  [LATIN SMALL LETTER V WITH HOOK]
+                                               case '\u028C': 
+                                               // รƒล ร…โ€™  [LATIN SMALL LETTER TURNED V]
+                                               case '\u1D65': 
+                                               // รƒยกร‚ยตร‚ยฅ  [LATIN SUBSCRIPT SMALL LETTER V]
+                                               case '\u1D8C': 
+                                               // รƒยกร‚ยถร…โ€™  [LATIN SMALL LETTER V WITH PALATAL HOOK]
+                                               case '\u1E7D': 
+                                               // รƒยกร‚ยนร‚ยฝ  [LATIN SMALL LETTER V WITH TILDE]
+                                               case '\u1E7F': 
+                                               // รƒยกร‚ยนร‚ยฟ  [LATIN SMALL LETTER V WITH DOT BELOW]
+                                               case '\u24E5': 
+                                               // รƒยขรขโ‚ฌล“ร‚ยฅ  [CIRCLED LATIN SMALL LETTER V]
+                                               case '\u2C71': 
+                                               // รƒยขร‚ยฑร‚ยฑ  [LATIN SMALL LETTER V WITH RIGHT HOOK]
+                                               case '\u2C74': 
+                                               // รƒยขร‚ยฑร‚ยด  [LATIN SMALL LETTER V WITH CURL]
+                                               case '\uA75F': 
+                                               // รƒยชรฏยฟยฝร…ยธ  [LATIN SMALL LETTER V WITH DIAGONAL STROKE]
+                                               case '\uFF56':  // รƒยฏร‚ยฝรขโ‚ฌโ€œ  [FULLWIDTH LATIN SMALL LETTER V]
+                                                       output[outputPos++] = 'v';
+                                                       break;
+                                               
+                                               case '\uA760':  // รƒยชรฏยฟยฝร‚ย   [LATIN CAPITAL LETTER VY]
+                                                       output[outputPos++] = 'V';
+                                                       output[outputPos++] = 'Y';
+                                                       break;
+                                               
+                                               case '\u24B1':  // รƒยขรขโ‚ฌโ„ขร‚ยฑ  [PARENTHESIZED LATIN SMALL LETTER V]
+                                                       output[outputPos++] = '(';
+                                                       output[outputPos++] = 'v';
+                                                       output[outputPos++] = ')';
+                                                       break;
+                                               
+                                               case '\uA761':  // รƒยชรฏยฟยฝร‚ยก  [LATIN SMALL LETTER VY]
+                                                       output[outputPos++] = 'v';
+                                                       output[outputPos++] = 'y';
+                                                       break;
+                                               
+                                               case '\u0174': 
+                                               // รƒโ€ฆร‚ยด  [LATIN CAPITAL LETTER W WITH CIRCUMFLEX]
+                                               case '\u01F7': 
+                                               // รƒโ€กร‚ยท  http://en.wikipedia.org/wiki/Wynn  [LATIN CAPITAL LETTER WYNN]
+                                               case '\u1D21': 
+                                               // รƒยกร‚ยดร‚ยก  [LATIN LETTER SMALL CAPITAL W]
+                                               case '\u1E80': 
+                                               // รƒยกร‚ยบรขโ€šยฌ  [LATIN CAPITAL LETTER W WITH GRAVE]
+                                               case '\u1E82': 
+                                               // รƒยกร‚ยบรขโ‚ฌลก  [LATIN CAPITAL LETTER W WITH ACUTE]
+                                               case '\u1E84': 
+                                               // รƒยกร‚ยบรขโ‚ฌลพ  [LATIN CAPITAL LETTER W WITH DIAERESIS]
+                                               case '\u1E86': 
+                                               // รƒยกร‚ยบรขโ‚ฌย   [LATIN CAPITAL LETTER W WITH DOT ABOVE]
+                                               case '\u1E88': 
+                                               // รƒยกร‚ยบร‹โ€   [LATIN CAPITAL LETTER W WITH DOT BELOW]
+                                               case '\u24CC': 
+                                               // รƒยขรขโ‚ฌล“ร…โ€™  [CIRCLED LATIN CAPITAL LETTER W]
+                                               case '\u2C72': 
+                                               // รƒยขร‚ยฑร‚ยฒ  [LATIN CAPITAL LETTER W WITH HOOK]
+                                               case '\uFF37':  // รƒยฏร‚ยผร‚ยท  [FULLWIDTH LATIN CAPITAL LETTER W]
+                                                       output[outputPos++] = 'W';
+                                                       break;
+                                               
+                                               case '\u0175': 
+                                               // รƒโ€ฆร‚ยต  [LATIN SMALL LETTER W WITH CIRCUMFLEX]
+                                               case '\u01BF': 
+                                               // รƒโ€ ร‚ยฟ  http://en.wikipedia.org/wiki/Wynn  [LATIN LETTER WYNN]
+                                               case '\u028D': 
+                                               // รƒล รฏยฟยฝ  [LATIN SMALL LETTER TURNED W]
+                                               case '\u1E81': 
+                                               // รƒยกร‚ยบรฏยฟยฝ  [LATIN SMALL LETTER W WITH GRAVE]
+                                               case '\u1E83': 
+                                               // รƒยกร‚ยบร†โ€™  [LATIN SMALL LETTER W WITH ACUTE]
+                                               case '\u1E85': 
+                                               // รƒยกร‚ยบรขโ‚ฌยฆ  [LATIN SMALL LETTER W WITH DIAERESIS]
+                                               case '\u1E87': 
+                                               // รƒยกร‚ยบรขโ‚ฌยก  [LATIN SMALL LETTER W WITH DOT ABOVE]
+                                               case '\u1E89': 
+                                               // รƒยกร‚ยบรขโ‚ฌยฐ  [LATIN SMALL LETTER W WITH DOT BELOW]
+                                               case '\u1E98': 
+                                               // รƒยกร‚ยบร‹ล“  [LATIN SMALL LETTER W WITH RING ABOVE]
+                                               case '\u24E6': 
+                                               // รƒยขรขโ‚ฌล“ร‚ยฆ  [CIRCLED LATIN SMALL LETTER W]
+                                               case '\u2C73': 
+                                               // รƒยขร‚ยฑร‚ยณ  [LATIN SMALL LETTER W WITH HOOK]
+                                               case '\uFF57':  // รƒยฏร‚ยฝรขโ‚ฌโ€  [FULLWIDTH LATIN SMALL LETTER W]
+                                                       output[outputPos++] = 'w';
+                                                       break;
+                                               
+                                               case '\u24B2':  // รƒยขรขโ‚ฌโ„ขร‚ยฒ  [PARENTHESIZED LATIN SMALL LETTER W]
+                                                       output[outputPos++] = '(';
+                                                       output[outputPos++] = 'w';
+                                                       output[outputPos++] = ')';
+                                                       break;
+                                               
+                                               case '\u1E8A': 
+                                               // รƒยกร‚ยบร…ย   [LATIN CAPITAL LETTER X WITH DOT ABOVE]
+                                               case '\u1E8C': 
+                                               // รƒยกร‚ยบร…โ€™  [LATIN CAPITAL LETTER X WITH DIAERESIS]
+                                               case '\u24CD': 
+                                               // รƒยขรขโ‚ฌล“รฏยฟยฝ  [CIRCLED LATIN CAPITAL LETTER X]
+                                               case '\uFF38':  // รƒยฏร‚ยผร‚ยธ  [FULLWIDTH LATIN CAPITAL LETTER X]
+                                                       output[outputPos++] = 'X';
+                                                       break;
+                                               
+                                               case '\u1D8D': 
+                                               // รƒยกร‚ยถรฏยฟยฝ  [LATIN SMALL LETTER X WITH PALATAL HOOK]
+                                               case '\u1E8B': 
+                                               // รƒยกร‚ยบรขโ‚ฌยน  [LATIN SMALL LETTER X WITH DOT ABOVE]
+                                               case '\u1E8D': 
+                                               // รƒยกร‚ยบรฏยฟยฝ  [LATIN SMALL LETTER X WITH DIAERESIS]
+                                               case '\u2093': 
+                                               // รƒยขรขโ‚ฌลกรขโ‚ฌล“  [LATIN SUBSCRIPT SMALL LETTER X]
+                                               case '\u24E7': 
+                                               // รƒยขรขโ‚ฌล“ร‚ยง  [CIRCLED LATIN SMALL LETTER X]
+                                               case '\uFF58':  // รƒยฏร‚ยฝร‹ล“  [FULLWIDTH LATIN SMALL LETTER X]
+                                                       output[outputPos++] = 'x';
+                                                       break;
+                                               
+                                               case '\u24B3':  // รƒยขรขโ‚ฌโ„ขร‚ยณ  [PARENTHESIZED LATIN SMALL LETTER X]
+                                                       output[outputPos++] = '(';
+                                                       output[outputPos++] = 'x';
+                                                       output[outputPos++] = ')';
+                                                       break;
+                                               
+                                               case '\u00DD': 
+                                               // รƒฦ’รฏยฟยฝ  [LATIN CAPITAL LETTER Y WITH ACUTE]
+                                               case '\u0176': 
+                                               // รƒโ€ฆร‚ยถ  [LATIN CAPITAL LETTER Y WITH CIRCUMFLEX]
+                                               case '\u0178': 
+                                               // รƒโ€ฆร‚ยธ  [LATIN CAPITAL LETTER Y WITH DIAERESIS]
+                                               case '\u01B3': 
+                                               // รƒโ€ ร‚ยณ  [LATIN CAPITAL LETTER Y WITH HOOK]
+                                               case '\u0232': 
+                                               // รƒห†ร‚ยฒ  [LATIN CAPITAL LETTER Y WITH MACRON]
+                                               case '\u024E': 
+                                               // รƒโ€ฐร…ยฝ  [LATIN CAPITAL LETTER Y WITH STROKE]
+                                               case '\u028F': 
+                                               // รƒล รฏยฟยฝ  [LATIN LETTER SMALL CAPITAL Y]
+                                               case '\u1E8E': 
+                                               // รƒยกร‚ยบร…ยฝ  [LATIN CAPITAL LETTER Y WITH DOT ABOVE]
+                                               case '\u1EF2': 
+                                               // รƒยกร‚ยปร‚ยฒ  [LATIN CAPITAL LETTER Y WITH GRAVE]
+                                               case '\u1EF4': 
+                                               // รƒยกร‚ยปร‚ยด  [LATIN CAPITAL LETTER Y WITH DOT BELOW]
+                                               case '\u1EF6': 
+                                               // รƒยกร‚ยปร‚ยถ  [LATIN CAPITAL LETTER Y WITH HOOK ABOVE]
+                                               case '\u1EF8': 
+                                               // รƒยกร‚ยปร‚ยธ  [LATIN CAPITAL LETTER Y WITH TILDE]
+                                               case '\u1EFE': 
+                                               // รƒยกร‚ยปร‚ยพ  [LATIN CAPITAL LETTER Y WITH LOOP]
+                                               case '\u24CE': 
+                                               // รƒยขรขโ‚ฌล“ร…ยฝ  [CIRCLED LATIN CAPITAL LETTER Y]
+                                               case '\uFF39':  // รƒยฏร‚ยผร‚ยน  [FULLWIDTH LATIN CAPITAL LETTER Y]
+                                                       output[outputPos++] = 'Y';
+                                                       break;
+                                               
+                                               case '\u00FD': 
+                                               // รƒฦ’ร‚ยฝ  [LATIN SMALL LETTER Y WITH ACUTE]
+                                               case '\u00FF': 
+                                               // รƒฦ’ร‚ยฟ  [LATIN SMALL LETTER Y WITH DIAERESIS]
+                                               case '\u0177': 
+                                               // รƒโ€ฆร‚ยท  [LATIN SMALL LETTER Y WITH CIRCUMFLEX]
+                                               case '\u01B4': 
+                                               // รƒโ€ ร‚ยด  [LATIN SMALL LETTER Y WITH HOOK]
+                                               case '\u0233': 
+                                               // รƒห†ร‚ยณ  [LATIN SMALL LETTER Y WITH MACRON]
+                                               case '\u024F': 
+                                               // รƒโ€ฐรฏยฟยฝ  [LATIN SMALL LETTER Y WITH STROKE]
+                                               case '\u028E': 
+                                               // รƒล ร…ยฝ  [LATIN SMALL LETTER TURNED Y]
+                                               case '\u1E8F': 
+                                               // รƒยกร‚ยบรฏยฟยฝ  [LATIN SMALL LETTER Y WITH DOT ABOVE]
+                                               case '\u1E99': 
+                                               // รƒยกร‚ยบรขโ€žยข  [LATIN SMALL LETTER Y WITH RING ABOVE]
+                                               case '\u1EF3': 
+                                               // รƒยกร‚ยปร‚ยณ  [LATIN SMALL LETTER Y WITH GRAVE]
+                                               case '\u1EF5': 
+                                               // รƒยกร‚ยปร‚ยต  [LATIN SMALL LETTER Y WITH DOT BELOW]
+                                               case '\u1EF7': 
+                                               // รƒยกร‚ยปร‚ยท  [LATIN SMALL LETTER Y WITH HOOK ABOVE]
+                                               case '\u1EF9': 
+                                               // รƒยกร‚ยปร‚ยน  [LATIN SMALL LETTER Y WITH TILDE]
+                                               case '\u1EFF': 
+                                               // รƒยกร‚ยปร‚ยฟ  [LATIN SMALL LETTER Y WITH LOOP]
+                                               case '\u24E8': 
+                                               // รƒยขรขโ‚ฌล“ร‚ยจ  [CIRCLED LATIN SMALL LETTER Y]
+                                               case '\uFF59':  // รƒยฏร‚ยฝรขโ€žยข  [FULLWIDTH LATIN SMALL LETTER Y]
+                                                       output[outputPos++] = 'y';
+                                                       break;
+                                               
+                                               case '\u24B4':  // รƒยขรขโ‚ฌโ„ขร‚ยด  [PARENTHESIZED LATIN SMALL LETTER Y]
+                                                       output[outputPos++] = '(';
+                                                       output[outputPos++] = 'y';
+                                                       output[outputPos++] = ')';
+                                                       break;
+                                               
+                                               case '\u0179': 
+                                               // รƒโ€ฆร‚ยน  [LATIN CAPITAL LETTER Z WITH ACUTE]
+                                               case '\u017B': 
+                                               // รƒโ€ฆร‚ยป  [LATIN CAPITAL LETTER Z WITH DOT ABOVE]
+                                               case '\u017D': 
+                                               // รƒโ€ฆร‚ยฝ  [LATIN CAPITAL LETTER Z WITH CARON]
+                                               case '\u01B5': 
+                                               // รƒโ€ ร‚ยต  [LATIN CAPITAL LETTER Z WITH STROKE]
+                                               case '\u021C': 
+                                               // รƒห†ร…โ€œ  http://en.wikipedia.org/wiki/Yogh  [LATIN CAPITAL LETTER YOGH]
+                                               case '\u0224': 
+                                               // รƒห†ร‚ยค  [LATIN CAPITAL LETTER Z WITH HOOK]
+                                               case '\u1D22': 
+                                               // รƒยกร‚ยดร‚ยข  [LATIN LETTER SMALL CAPITAL Z]
+                                               case '\u1E90': 
+                                               // รƒยกร‚ยบรฏยฟยฝ  [LATIN CAPITAL LETTER Z WITH CIRCUMFLEX]
+                                               case '\u1E92': 
+                                               // รƒยกร‚ยบรขโ‚ฌโ„ข  [LATIN CAPITAL LETTER Z WITH DOT BELOW]
+                                               case '\u1E94': 
+                                               // รƒยกร‚ยบรฏยฟยฝ?  [LATIN CAPITAL LETTER Z WITH LINE BELOW]
+                                               case '\u24CF': 
+                                               // รƒยขรขโ‚ฌล“รฏยฟยฝ  [CIRCLED LATIN CAPITAL LETTER Z]
+                                               case '\u2C6B': 
+                                               // รƒยขร‚ยฑร‚ยซ  [LATIN CAPITAL LETTER Z WITH DESCENDER]
+                                               case '\uA762': 
+                                               // รƒยชรฏยฟยฝร‚ยข  [LATIN CAPITAL LETTER VISIGOTHIC Z]
+                                               case '\uFF3A':  // รƒยฏร‚ยผร‚ยบ  [FULLWIDTH LATIN CAPITAL LETTER Z]
+                                                       output[outputPos++] = 'Z';
+                                                       break;
+                                               
+                                               case '\u017A': 
+                                               // รƒโ€ฆร‚ยบ  [LATIN SMALL LETTER Z WITH ACUTE]
+                                               case '\u017C': 
+                                               // รƒโ€ฆร‚ยผ  [LATIN SMALL LETTER Z WITH DOT ABOVE]
+                                               case '\u017E': 
+                                               // รƒโ€ฆร‚ยพ  [LATIN SMALL LETTER Z WITH CARON]
+                                               case '\u01B6': 
+                                               // รƒโ€ ร‚ยถ  [LATIN SMALL LETTER Z WITH STROKE]
+                                               case '\u021D': 
+                                               // รƒห†รฏยฟยฝ  http://en.wikipedia.org/wiki/Yogh  [LATIN SMALL LETTER YOGH]
+                                               case '\u0225': 
+                                               // รƒห†ร‚ยฅ  [LATIN SMALL LETTER Z WITH HOOK]
+                                               case '\u0240': 
+                                               // รƒโ€ฐรขโ€šยฌ  [LATIN SMALL LETTER Z WITH SWASH TAIL]
+                                               case '\u0290': 
+                                               // รƒล รฏยฟยฝ  [LATIN SMALL LETTER Z WITH RETROFLEX HOOK]
+                                               case '\u0291': 
+                                               // รƒล รขโ‚ฌหœ  [LATIN SMALL LETTER Z WITH CURL]
+                                               case '\u1D76': 
+                                               // รƒยกร‚ยตร‚ยถ  [LATIN SMALL LETTER Z WITH MIDDLE TILDE]
+                                               case '\u1D8E': 
+                                               // รƒยกร‚ยถร…ยฝ  [LATIN SMALL LETTER Z WITH PALATAL HOOK]
+                                               case '\u1E91': 
+                                               // รƒยกร‚ยบรขโ‚ฌหœ  [LATIN SMALL LETTER Z WITH CIRCUMFLEX]
+                                               case '\u1E93': 
+                                               // รƒยกร‚ยบรขโ‚ฌล“  [LATIN SMALL LETTER Z WITH DOT BELOW]
+                                               case '\u1E95': 
+                                               // รƒยกร‚ยบรขโ‚ฌยข  [LATIN SMALL LETTER Z WITH LINE BELOW]
+                                               case '\u24E9': 
+                                               // รƒยขรขโ‚ฌล“ร‚ยฉ  [CIRCLED LATIN SMALL LETTER Z]
+                                               case '\u2C6C': 
+                                               // รƒยขร‚ยฑร‚ยฌ  [LATIN SMALL LETTER Z WITH DESCENDER]
+                                               case '\uA763': 
+                                               // รƒยชรฏยฟยฝร‚ยฃ  [LATIN SMALL LETTER VISIGOTHIC Z]
+                                               case '\uFF5A':  // รƒยฏร‚ยฝร…ยก  [FULLWIDTH LATIN SMALL LETTER Z]
+                                                       output[outputPos++] = 'z';
+                                                       break;
+                                               
+                                               case '\u24B5':  // รƒยขรขโ‚ฌโ„ขร‚ยต  [PARENTHESIZED LATIN SMALL LETTER Z]
+                                                       output[outputPos++] = '(';
+                                                       output[outputPos++] = 'z';
+                                                       output[outputPos++] = ')';
+                                                       break;
+                                               
+                                               case '\u2070': 
+                                               // รƒยขรฏยฟยฝร‚ยฐ  [SUPERSCRIPT ZERO]
+                                               case '\u2080': 
+                                               // รƒยขรขโ‚ฌลกรขโ€šยฌ  [SUBSCRIPT ZERO]
+                                               case '\u24EA': 
+                                               // รƒยขรขโ‚ฌล“ร‚ยช  [CIRCLED DIGIT ZERO]
+                                               case '\u24FF': 
+                                               // รƒยขรขโ‚ฌล“ร‚ยฟ  [NEGATIVE CIRCLED DIGIT ZERO]
+                                               case '\uFF10':  // รƒยฏร‚ยผรฏยฟยฝ  [FULLWIDTH DIGIT ZERO]
+                                                       output[outputPos++] = '0';
+                                                       break;
+                                               
+                                               case '\u00B9': 
+                                               // รƒโ€šร‚ยน  [SUPERSCRIPT ONE]
+                                               case '\u2081': 
+                                               // รƒยขรขโ‚ฌลกรฏยฟยฝ  [SUBSCRIPT ONE]
+                                               case '\u2460': 
+                                               // รƒยขรขโ‚ฌหœร‚ย   [CIRCLED DIGIT ONE]
+                                               case '\u24F5': 
+                                               // รƒยขรขโ‚ฌล“ร‚ยต  [DOUBLE CIRCLED DIGIT ONE]
+                                               case '\u2776': 
+                                               // รƒยขรฏยฟยฝร‚ยถ  [DINGBAT NEGATIVE CIRCLED DIGIT ONE]
+                                               case '\u2780': 
+                                               // รƒยขร…ยพรขโ€šยฌ  [DINGBAT CIRCLED SANS-SERIF DIGIT ONE]
+                                               case '\u278A': 
+                                               // รƒยขร…ยพร…ย   [DINGBAT NEGATIVE CIRCLED SANS-SERIF DIGIT ONE]
+                                               case '\uFF11':  // รƒยฏร‚ยผรขโ‚ฌหœ  [FULLWIDTH DIGIT ONE]
+                                                       output[outputPos++] = '1';
+                                                       break;
+                                               
+                                               case '\u2488':  // รƒยขรขโ‚ฌโ„ขร‹โ€   [DIGIT ONE FULL STOP]
+                                                       output[outputPos++] = '1';
+                                                       output[outputPos++] = '.';
+                                                       break;
+                                               
+                                               case '\u2474':  // รƒยขรขโ‚ฌหœร‚ยด  [PARENTHESIZED DIGIT ONE]
+                                                       output[outputPos++] = '(';
+                                                       output[outputPos++] = '1';
+                                                       output[outputPos++] = ')';
+                                                       break;
+                                               
+                                               case '\u00B2': 
+                                               // รƒโ€šร‚ยฒ  [SUPERSCRIPT TWO]
+                                               case '\u2082': 
+                                               // รƒยขรขโ‚ฌลกรขโ‚ฌลก  [SUBSCRIPT TWO]
+                                               case '\u2461': 
+                                               // รƒยขรขโ‚ฌหœร‚ยก  [CIRCLED DIGIT TWO]
+                                               case '\u24F6': 
+                                               // รƒยขรขโ‚ฌล“ร‚ยถ  [DOUBLE CIRCLED DIGIT TWO]
+                                               case '\u2777': 
+                                               // รƒยขรฏยฟยฝร‚ยท  [DINGBAT NEGATIVE CIRCLED DIGIT TWO]
+                                               case '\u2781': 
+                                               // รƒยขร…ยพรฏยฟยฝ  [DINGBAT CIRCLED SANS-SERIF DIGIT TWO]
+                                               case '\u278B': 
+                                               // รƒยขร…ยพรขโ‚ฌยน  [DINGBAT NEGATIVE CIRCLED SANS-SERIF DIGIT TWO]
+                                               case '\uFF12':  // รƒยฏร‚ยผรขโ‚ฌโ„ข  [FULLWIDTH DIGIT TWO]
+                                                       output[outputPos++] = '2';
+                                                       break;
+                                               
+                                               case '\u2489':  // รƒยขรขโ‚ฌโ„ขรขโ‚ฌยฐ  [DIGIT TWO FULL STOP]
+                                                       output[outputPos++] = '2';
+                                                       output[outputPos++] = '.';
+                                                       break;
+                                               
+                                               case '\u2475':  // รƒยขรขโ‚ฌหœร‚ยต  [PARENTHESIZED DIGIT TWO]
+                                                       output[outputPos++] = '(';
+                                                       output[outputPos++] = '2';
+                                                       output[outputPos++] = ')';
+                                                       break;
+                                               
+                                               case '\u00B3': 
+                                               // รƒโ€šร‚ยณ  [SUPERSCRIPT THREE]
+                                               case '\u2083': 
+                                               // รƒยขรขโ‚ฌลกร†โ€™  [SUBSCRIPT THREE]
+                                               case '\u2462': 
+                                               // รƒยขรขโ‚ฌหœร‚ยข  [CIRCLED DIGIT THREE]
+                                               case '\u24F7': 
+                                               // รƒยขรขโ‚ฌล“ร‚ยท  [DOUBLE CIRCLED DIGIT THREE]
+                                               case '\u2778': 
+                                               // รƒยขรฏยฟยฝร‚ยธ  [DINGBAT NEGATIVE CIRCLED DIGIT THREE]
+                                               case '\u2782': 
+                                               // รƒยขร…ยพรขโ‚ฌลก  [DINGBAT CIRCLED SANS-SERIF DIGIT THREE]
+                                               case '\u278C': 
+                                               // รƒยขร…ยพร…โ€™  [DINGBAT NEGATIVE CIRCLED SANS-SERIF DIGIT THREE]
+                                               case '\uFF13':  // รƒยฏร‚ยผรขโ‚ฌล“  [FULLWIDTH DIGIT THREE]
+                                                       output[outputPos++] = '3';
+                                                       break;
+                                               
+                                               case '\u248A':  // รƒยขรขโ‚ฌโ„ขร…ย   [DIGIT THREE FULL STOP]
+                                                       output[outputPos++] = '3';
+                                                       output[outputPos++] = '.';
+                                                       break;
+                                               
+                                               case '\u2476':  // รƒยขรขโ‚ฌหœร‚ยถ  [PARENTHESIZED DIGIT THREE]
+                                                       output[outputPos++] = '(';
+                                                       output[outputPos++] = '3';
+                                                       output[outputPos++] = ')';
+                                                       break;
+                                               
+                                               case '\u2074': 
+                                               // รƒยขรฏยฟยฝร‚ยด  [SUPERSCRIPT FOUR]
+                                               case '\u2084': 
+                                               // รƒยขรขโ‚ฌลกรขโ‚ฌลพ  [SUBSCRIPT FOUR]
+                                               case '\u2463': 
+                                               // รƒยขรขโ‚ฌหœร‚ยฃ  [CIRCLED DIGIT FOUR]
+                                               case '\u24F8': 
+                                               // รƒยขรขโ‚ฌล“ร‚ยธ  [DOUBLE CIRCLED DIGIT FOUR]
+                                               case '\u2779': 
+                                               // รƒยขรฏยฟยฝร‚ยน  [DINGBAT NEGATIVE CIRCLED DIGIT FOUR]
+                                               case '\u2783': 
+                                               // รƒยขร…ยพร†โ€™  [DINGBAT CIRCLED SANS-SERIF DIGIT FOUR]
+                                               case '\u278D': 
+                                               // รƒยขร…ยพรฏยฟยฝ  [DINGBAT NEGATIVE CIRCLED SANS-SERIF DIGIT FOUR]
+                                               case '\uFF14':  // รƒยฏร‚ยผรฏยฟยฝ?  [FULLWIDTH DIGIT FOUR]
+                                                       output[outputPos++] = '4';
+                                                       break;
+                                               
+                                               case '\u248B':  // รƒยขรขโ‚ฌโ„ขรขโ‚ฌยน  [DIGIT FOUR FULL STOP]
+                                                       output[outputPos++] = '4';
+                                                       output[outputPos++] = '.';
+                                                       break;
+                                               
+                                               case '\u2477':  // รƒยขรขโ‚ฌหœร‚ยท  [PARENTHESIZED DIGIT FOUR]
+                                                       output[outputPos++] = '(';
+                                                       output[outputPos++] = '4';
+                                                       output[outputPos++] = ')';
+                                                       break;
+                                               
+                                               case '\u2075': 
+                                               // รƒยขรฏยฟยฝร‚ยต  [SUPERSCRIPT FIVE]
+                                               case '\u2085': 
+                                               // รƒยขรขโ‚ฌลกรขโ‚ฌยฆ  [SUBSCRIPT FIVE]
+                                               case '\u2464': 
+                                               // รƒยขรขโ‚ฌหœร‚ยค  [CIRCLED DIGIT FIVE]
+                                               case '\u24F9': 
+                                               // รƒยขรขโ‚ฌล“ร‚ยน  [DOUBLE CIRCLED DIGIT FIVE]
+                                               case '\u277A': 
+                                               // รƒยขรฏยฟยฝร‚ยบ  [DINGBAT NEGATIVE CIRCLED DIGIT FIVE]
+                                               case '\u2784': 
+                                               // รƒยขร…ยพรขโ‚ฌลพ  [DINGBAT CIRCLED SANS-SERIF DIGIT FIVE]
+                                               case '\u278E': 
+                                               // รƒยขร…ยพร…ยฝ  [DINGBAT NEGATIVE CIRCLED SANS-SERIF DIGIT FIVE]
+                                               case '\uFF15':  // รƒยฏร‚ยผรขโ‚ฌยข  [FULLWIDTH DIGIT FIVE]
+                                                       output[outputPos++] = '5';
+                                                       break;
+                                               
+                                               case '\u248C':  // รƒยขรขโ‚ฌโ„ขร…โ€™  [DIGIT FIVE FULL STOP]
+                                                       output[outputPos++] = '5';
+                                                       output[outputPos++] = '.';
+                                                       break;
+                                               
+                                               case '\u2478':  // รƒยขรขโ‚ฌหœร‚ยธ  [PARENTHESIZED DIGIT FIVE]
+                                                       output[outputPos++] = '(';
+                                                       output[outputPos++] = '5';
+                                                       output[outputPos++] = ')';
+                                                       break;
+                                               
+                                               case '\u2076': 
+                                               // รƒยขรฏยฟยฝร‚ยถ  [SUPERSCRIPT SIX]
+                                               case '\u2086': 
+                                               // รƒยขรขโ‚ฌลกรขโ‚ฌย   [SUBSCRIPT SIX]
+                                               case '\u2465': 
+                                               // รƒยขรขโ‚ฌหœร‚ยฅ  [CIRCLED DIGIT SIX]
+                                               case '\u24FA': 
+                                               // รƒยขรขโ‚ฌล“ร‚ยบ  [DOUBLE CIRCLED DIGIT SIX]
+                                               case '\u277B': 
+                                               // รƒยขรฏยฟยฝร‚ยป  [DINGBAT NEGATIVE CIRCLED DIGIT SIX]
+                                               case '\u2785': 
+                                               // รƒยขร…ยพรขโ‚ฌยฆ  [DINGBAT CIRCLED SANS-SERIF DIGIT SIX]
+                                               case '\u278F': 
+                                               // รƒยขร…ยพรฏยฟยฝ  [DINGBAT NEGATIVE CIRCLED SANS-SERIF DIGIT SIX]
+                                               case '\uFF16':  // รƒยฏร‚ยผรขโ‚ฌโ€œ  [FULLWIDTH DIGIT SIX]
+                                                       output[outputPos++] = '6';
+                                                       break;
+                                               
+                                               case '\u248D':  // รƒยขรขโ‚ฌโ„ขรฏยฟยฝ  [DIGIT SIX FULL STOP]
+                                                       output[outputPos++] = '6';
+                                                       output[outputPos++] = '.';
+                                                       break;
+                                               
+                                               case '\u2479':  // รƒยขรขโ‚ฌหœร‚ยน  [PARENTHESIZED DIGIT SIX]
+                                                       output[outputPos++] = '(';
+                                                       output[outputPos++] = '6';
+                                                       output[outputPos++] = ')';
+                                                       break;
+                                               
+                                               case '\u2077': 
+                                               // รƒยขรฏยฟยฝร‚ยท  [SUPERSCRIPT SEVEN]
+                                               case '\u2087': 
+                                               // รƒยขรขโ‚ฌลกรขโ‚ฌยก  [SUBSCRIPT SEVEN]
+                                               case '\u2466': 
+                                               // รƒยขรขโ‚ฌหœร‚ยฆ  [CIRCLED DIGIT SEVEN]
+                                               case '\u24FB': 
+                                               // รƒยขรขโ‚ฌล“ร‚ยป  [DOUBLE CIRCLED DIGIT SEVEN]
+                                               case '\u277C': 
+                                               // รƒยขรฏยฟยฝร‚ยผ  [DINGBAT NEGATIVE CIRCLED DIGIT SEVEN]
+                                               case '\u2786': 
+                                               // รƒยขร…ยพรขโ‚ฌย   [DINGBAT CIRCLED SANS-SERIF DIGIT SEVEN]
+                                               case '\u2790': 
+                                               // รƒยขร…ยพรฏยฟยฝ  [DINGBAT NEGATIVE CIRCLED SANS-SERIF DIGIT SEVEN]
+                                               case '\uFF17':  // รƒยฏร‚ยผรขโ‚ฌโ€  [FULLWIDTH DIGIT SEVEN]
+                                                       output[outputPos++] = '7';
+                                                       break;
+                                               
+                                               case '\u248E':  // รƒยขรขโ‚ฌโ„ขร…ยฝ  [DIGIT SEVEN FULL STOP]
+                                                       output[outputPos++] = '7';
+                                                       output[outputPos++] = '.';
+                                                       break;
+                                               
+                                               case '\u247A':  // รƒยขรขโ‚ฌหœร‚ยบ  [PARENTHESIZED DIGIT SEVEN]
+                                                       output[outputPos++] = '(';
+                                                       output[outputPos++] = '7';
+                                                       output[outputPos++] = ')';
+                                                       break;
+                                               
+                                               case '\u2078': 
+                                               // รƒยขรฏยฟยฝร‚ยธ  [SUPERSCRIPT EIGHT]
+                                               case '\u2088': 
+                                               // รƒยขรขโ‚ฌลกร‹โ€   [SUBSCRIPT EIGHT]
+                                               case '\u2467': 
+                                               // รƒยขรขโ‚ฌหœร‚ยง  [CIRCLED DIGIT EIGHT]
+                                               case '\u24FC': 
+                                               // รƒยขรขโ‚ฌล“ร‚ยผ  [DOUBLE CIRCLED DIGIT EIGHT]
+                                               case '\u277D': 
+                                               // รƒยขรฏยฟยฝร‚ยฝ  [DINGBAT NEGATIVE CIRCLED DIGIT EIGHT]
+                                               case '\u2787': 
+                                               // รƒยขร…ยพรขโ‚ฌยก  [DINGBAT CIRCLED SANS-SERIF DIGIT EIGHT]
+                                               case '\u2791': 
+                                               // รƒยขร…ยพรขโ‚ฌหœ  [DINGBAT NEGATIVE CIRCLED SANS-SERIF DIGIT EIGHT]
+                                               case '\uFF18':  // รƒยฏร‚ยผร‹ล“  [FULLWIDTH DIGIT EIGHT]
+                                                       output[outputPos++] = '8';
+                                                       break;
+                                               
+                                               case '\u248F':  // รƒยขรขโ‚ฌโ„ขรฏยฟยฝ  [DIGIT EIGHT FULL STOP]
+                                                       output[outputPos++] = '8';
+                                                       output[outputPos++] = '.';
+                                                       break;
+                                               
+                                               case '\u247B':  // รƒยขรขโ‚ฌหœร‚ยป  [PARENTHESIZED DIGIT EIGHT]
+                                                       output[outputPos++] = '(';
+                                                       output[outputPos++] = '8';
+                                                       output[outputPos++] = ')';
+                                                       break;
+                                               
+                                               case '\u2079': 
+                                               // รƒยขรฏยฟยฝร‚ยน  [SUPERSCRIPT NINE]
+                                               case '\u2089': 
+                                               // รƒยขรขโ‚ฌลกรขโ‚ฌยฐ  [SUBSCRIPT NINE]
+                                               case '\u2468': 
+                                               // รƒยขรขโ‚ฌหœร‚ยจ  [CIRCLED DIGIT NINE]
+                                               case '\u24FD': 
+                                               // รƒยขรขโ‚ฌล“ร‚ยฝ  [DOUBLE CIRCLED DIGIT NINE]
+                                               case '\u277E': 
+                                               // รƒยขรฏยฟยฝร‚ยพ  [DINGBAT NEGATIVE CIRCLED DIGIT NINE]
+                                               case '\u2788': 
+                                               // รƒยขร…ยพร‹โ€   [DINGBAT CIRCLED SANS-SERIF DIGIT NINE]
+                                               case '\u2792': 
+                                               // รƒยขร…ยพรขโ‚ฌโ„ข  [DINGBAT NEGATIVE CIRCLED SANS-SERIF DIGIT NINE]
+                                               case '\uFF19':  // รƒยฏร‚ยผรขโ€žยข  [FULLWIDTH DIGIT NINE]
+                                                       output[outputPos++] = '9';
+                                                       break;
+                                               
+                                               case '\u2490':  // รƒยขรขโ‚ฌโ„ขรฏยฟยฝ  [DIGIT NINE FULL STOP]
+                                                       output[outputPos++] = '9';
+                                                       output[outputPos++] = '.';
+                                                       break;
+                                               
+                                               case '\u247C':  // รƒยขรขโ‚ฌหœร‚ยผ  [PARENTHESIZED DIGIT NINE]
+                                                       output[outputPos++] = '(';
+                                                       output[outputPos++] = '9';
+                                                       output[outputPos++] = ')';
+                                                       break;
+                                               
+                                               case '\u2469': 
+                                               // รƒยขรขโ‚ฌหœร‚ยฉ  [CIRCLED NUMBER TEN]
+                                               case '\u24FE': 
+                                               // รƒยขรขโ‚ฌล“ร‚ยพ  [DOUBLE CIRCLED NUMBER TEN]
+                                               case '\u277F': 
+                                               // รƒยขรฏยฟยฝร‚ยฟ  [DINGBAT NEGATIVE CIRCLED NUMBER TEN]
+                                               case '\u2789': 
+                                               // รƒยขร…ยพรขโ‚ฌยฐ  [DINGBAT CIRCLED SANS-SERIF NUMBER TEN]
+                                               case '\u2793':  // รƒยขร…ยพรขโ‚ฌล“  [DINGBAT NEGATIVE CIRCLED SANS-SERIF NUMBER TEN]
+                                                       output[outputPos++] = '1';
+                                                       output[outputPos++] = '0';
+                                                       break;
+                                               
+                                               case '\u2491':  // รƒยขรขโ‚ฌโ„ขรขโ‚ฌหœ  [NUMBER TEN FULL STOP]
+                                                       output[outputPos++] = '1';
+                                                       output[outputPos++] = '0';
+                                                       output[outputPos++] = '.';
+                                                       break;
+                                               
+                                               case '\u247D':  // รƒยขรขโ‚ฌหœร‚ยฝ  [PARENTHESIZED NUMBER TEN]
+                                                       output[outputPos++] = '(';
+                                                       output[outputPos++] = '1';
+                                                       output[outputPos++] = '0';
+                                                       output[outputPos++] = ')';
+                                                       break;
+                                               
+                                               case '\u246A': 
+                                               // รƒยขรขโ‚ฌหœร‚ยช  [CIRCLED NUMBER ELEVEN]
+                                               case '\u24EB':  // รƒยขรขโ‚ฌล“ร‚ยซ  [NEGATIVE CIRCLED NUMBER ELEVEN]
+                                                       output[outputPos++] = '1';
+                                                       output[outputPos++] = '1';
+                                                       break;
+                                               
+                                               case '\u2492':  // รƒยขรขโ‚ฌโ„ขรขโ‚ฌโ„ข  [NUMBER ELEVEN FULL STOP]
+                                                       output[outputPos++] = '1';
+                                                       output[outputPos++] = '1';
+                                                       output[outputPos++] = '.';
+                                                       break;
+                                               
+                                               case '\u247E':  // รƒยขรขโ‚ฌหœร‚ยพ  [PARENTHESIZED NUMBER ELEVEN]
+                                                       output[outputPos++] = '(';
+                                                       output[outputPos++] = '1';
+                                                       output[outputPos++] = '1';
+                                                       output[outputPos++] = ')';
+                                                       break;
+                                               
+                                               case '\u246B': 
+                                               // รƒยขรขโ‚ฌหœร‚ยซ  [CIRCLED NUMBER TWELVE]
+                                               case '\u24EC':  // รƒยขรขโ‚ฌล“ร‚ยฌ  [NEGATIVE CIRCLED NUMBER TWELVE]
+                                                       output[outputPos++] = '1';
+                                                       output[outputPos++] = '2';
+                                                       break;
+                                               
+                                               case '\u2493':  // รƒยขรขโ‚ฌโ„ขรขโ‚ฌล“  [NUMBER TWELVE FULL STOP]
+                                                       output[outputPos++] = '1';
+                                                       output[outputPos++] = '2';
+                                                       output[outputPos++] = '.';
+                                                       break;
+                                               
+                                               case '\u247F':  // รƒยขรขโ‚ฌหœร‚ยฟ  [PARENTHESIZED NUMBER TWELVE]
+                                                       output[outputPos++] = '(';
+                                                       output[outputPos++] = '1';
+                                                       output[outputPos++] = '2';
+                                                       output[outputPos++] = ')';
+                                                       break;
+                                               
+                                               case '\u246C': 
+                                               // รƒยขรขโ‚ฌหœร‚ยฌ  [CIRCLED NUMBER THIRTEEN]
+                                               case '\u24ED':  // รƒยขรขโ‚ฌล“ร‚ยญ  [NEGATIVE CIRCLED NUMBER THIRTEEN]
+                                                       output[outputPos++] = '1';
+                                                       output[outputPos++] = '3';
+                                                       break;
+                                               
+                                               case '\u2494':  // รƒยขรขโ‚ฌโ„ขรฏยฟยฝ?  [NUMBER THIRTEEN FULL STOP]
+                                                       output[outputPos++] = '1';
+                                                       output[outputPos++] = '3';
+                                                       output[outputPos++] = '.';
+                                                       break;
+                                               
+                                               case '\u2480':  // รƒยขรขโ‚ฌโ„ขรขโ€šยฌ  [PARENTHESIZED NUMBER THIRTEEN]
+                                                       output[outputPos++] = '(';
+                                                       output[outputPos++] = '1';
+                                                       output[outputPos++] = '3';
+                                                       output[outputPos++] = ')';
+                                                       break;
+                                               
+                                               case '\u246D': 
+                                               // รƒยขรขโ‚ฌหœร‚ยญ  [CIRCLED NUMBER FOURTEEN]
+                                               case '\u24EE':  // รƒยขรขโ‚ฌล“ร‚ยฎ  [NEGATIVE CIRCLED NUMBER FOURTEEN]
+                                                       output[outputPos++] = '1';
+                                                       output[outputPos++] = '4';
+                                                       break;
+                                               
+                                               case '\u2495':  // รƒยขรขโ‚ฌโ„ขรขโ‚ฌยข  [NUMBER FOURTEEN FULL STOP]
+                                                       output[outputPos++] = '1';
+                                                       output[outputPos++] = '4';
+                                                       output[outputPos++] = '.';
+                                                       break;
+                                               
+                                               case '\u2481':  // รƒยขรขโ‚ฌโ„ขรฏยฟยฝ  [PARENTHESIZED NUMBER FOURTEEN]
+                                                       output[outputPos++] = '(';
+                                                       output[outputPos++] = '1';
+                                                       output[outputPos++] = '4';
+                                                       output[outputPos++] = ')';
+                                                       break;
+                                               
+                                               case '\u246E': 
+                                               // รƒยขรขโ‚ฌหœร‚ยฎ  [CIRCLED NUMBER FIFTEEN]
+                                               case '\u24EF':  // รƒยขรขโ‚ฌล“ร‚ยฏ  [NEGATIVE CIRCLED NUMBER FIFTEEN]
+                                                       output[outputPos++] = '1';
+                                                       output[outputPos++] = '5';
+                                                       break;
+                                               
+                                               case '\u2496':  // รƒยขรขโ‚ฌโ„ขรขโ‚ฌโ€œ  [NUMBER FIFTEEN FULL STOP]
+                                                       output[outputPos++] = '1';
+                                                       output[outputPos++] = '5';
+                                                       output[outputPos++] = '.';
+                                                       break;
+                                               
+                                               case '\u2482':  // รƒยขรขโ‚ฌโ„ขรขโ‚ฌลก  [PARENTHESIZED NUMBER FIFTEEN]
+                                                       output[outputPos++] = '(';
+                                                       output[outputPos++] = '1';
+                                                       output[outputPos++] = '5';
+                                                       output[outputPos++] = ')';
+                                                       break;
+                                               
+                                               case '\u246F': 
+                                               // รƒยขรขโ‚ฌหœร‚ยฏ  [CIRCLED NUMBER SIXTEEN]
+                                               case '\u24F0':  // รƒยขรขโ‚ฌล“ร‚ยฐ  [NEGATIVE CIRCLED NUMBER SIXTEEN]
+                                                       output[outputPos++] = '1';
+                                                       output[outputPos++] = '6';
+                                                       break;
+                                               
+                                               case '\u2497':  // รƒยขรขโ‚ฌโ„ขรขโ‚ฌโ€  [NUMBER SIXTEEN FULL STOP]
+                                                       output[outputPos++] = '1';
+                                                       output[outputPos++] = '6';
+                                                       output[outputPos++] = '.';
+                                                       break;
+                                               
+                                               case '\u2483':  // รƒยขรขโ‚ฌโ„ขร†โ€™  [PARENTHESIZED NUMBER SIXTEEN]
+                                                       output[outputPos++] = '(';
+                                                       output[outputPos++] = '1';
+                                                       output[outputPos++] = '6';
+                                                       output[outputPos++] = ')';
+                                                       break;
+                                               
+                                               case '\u2470': 
+                                               // รƒยขรขโ‚ฌหœร‚ยฐ  [CIRCLED NUMBER SEVENTEEN]
+                                               case '\u24F1':  // รƒยขรขโ‚ฌล“ร‚ยฑ  [NEGATIVE CIRCLED NUMBER SEVENTEEN]
+                                                       output[outputPos++] = '1';
+                                                       output[outputPos++] = '7';
+                                                       break;
+                                               
+                                               case '\u2498':  // รƒยขรขโ‚ฌโ„ขร‹ล“  [NUMBER SEVENTEEN FULL STOP]
+                                                       output[outputPos++] = '1';
+                                                       output[outputPos++] = '7';
+                                                       output[outputPos++] = '.';
+                                                       break;
+                                               
+                                               case '\u2484':  // รƒยขรขโ‚ฌโ„ขรขโ‚ฌลพ  [PARENTHESIZED NUMBER SEVENTEEN]
+                                                       output[outputPos++] = '(';
+                                                       output[outputPos++] = '1';
+                                                       output[outputPos++] = '7';
+                                                       output[outputPos++] = ')';
+                                                       break;
+                                               
+                                               case '\u2471': 
+                                               // รƒยขรขโ‚ฌหœร‚ยฑ  [CIRCLED NUMBER EIGHTEEN]
+                                               case '\u24F2':  // รƒยขรขโ‚ฌล“ร‚ยฒ  [NEGATIVE CIRCLED NUMBER EIGHTEEN]
+                                                       output[outputPos++] = '1';
+                                                       output[outputPos++] = '8';
+                                                       break;
+                                               
+                                               case '\u2499':  // รƒยขรขโ‚ฌโ„ขรขโ€žยข  [NUMBER EIGHTEEN FULL STOP]
+                                                       output[outputPos++] = '1';
+                                                       output[outputPos++] = '8';
+                                                       output[outputPos++] = '.';
+                                                       break;
+                                               
+                                               case '\u2485':  // รƒยขรขโ‚ฌโ„ขรขโ‚ฌยฆ  [PARENTHESIZED NUMBER EIGHTEEN]
+                                                       output[outputPos++] = '(';
+                                                       output[outputPos++] = '1';
+                                                       output[outputPos++] = '8';
+                                                       output[outputPos++] = ')';
+                                                       break;
+                                               
+                                               case '\u2472': 
+                                               // รƒยขรขโ‚ฌหœร‚ยฒ  [CIRCLED NUMBER NINETEEN]
+                                               case '\u24F3':  // รƒยขรขโ‚ฌล“ร‚ยณ  [NEGATIVE CIRCLED NUMBER NINETEEN]
+                                                       output[outputPos++] = '1';
+                                                       output[outputPos++] = '9';
+                                                       break;
+                                               
+                                               case '\u249A':  // รƒยขรขโ‚ฌโ„ขร…ยก  [NUMBER NINETEEN FULL STOP]
+                                                       output[outputPos++] = '1';
+                                                       output[outputPos++] = '9';
+                                                       output[outputPos++] = '.';
+                                                       break;
+                                               
+                                               case '\u2486':  // รƒยขรขโ‚ฌโ„ขรขโ‚ฌย   [PARENTHESIZED NUMBER NINETEEN]
+                                                       output[outputPos++] = '(';
+                                                       output[outputPos++] = '1';
+                                                       output[outputPos++] = '9';
+                                                       output[outputPos++] = ')';
+                                                       break;
+                                               
+                                               case '\u2473': 
+                                               // รƒยขรขโ‚ฌหœร‚ยณ  [CIRCLED NUMBER TWENTY]
+                                               case '\u24F4':  // รƒยขรขโ‚ฌล“ร‚ยด  [NEGATIVE CIRCLED NUMBER TWENTY]
+                                                       output[outputPos++] = '2';
+                                                       output[outputPos++] = '0';
+                                                       break;
+                                               
+                                               case '\u249B':  // รƒยขรขโ‚ฌโ„ขรขโ‚ฌยบ  [NUMBER TWENTY FULL STOP]
+                                                       output[outputPos++] = '2';
+                                                       output[outputPos++] = '0';
+                                                       output[outputPos++] = '.';
+                                                       break;
+                                               
+                                               case '\u2487':  // รƒยขรขโ‚ฌโ„ขรขโ‚ฌยก  [PARENTHESIZED NUMBER TWENTY]
+                                                       output[outputPos++] = '(';
+                                                       output[outputPos++] = '2';
+                                                       output[outputPos++] = '0';
+                                                       output[outputPos++] = ')';
+                                                       break;
+                                               
+                                               case '\u00AB': 
+                                               // รƒโ€šร‚ยซ  [LEFT-POINTING DOUBLE ANGLE QUOTATION MARK]
+                                               case '\u00BB': 
+                                               // รƒโ€šร‚ยป  [RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK]
+                                               case '\u201C': 
+                                               // รƒยขรขโ€šยฌร…โ€œ  [LEFT DOUBLE QUOTATION MARK]
+                                               case '\u201D': 
+                                               // รƒยขรขโ€šยฌรฏยฟยฝ  [RIGHT DOUBLE QUOTATION MARK]
+                                               case '\u201E': 
+                                               // รƒยขรขโ€šยฌร…ยพ  [DOUBLE LOW-9 QUOTATION MARK]
+                                               case '\u2033': 
+                                               // รƒยขรขโ€šยฌร‚ยณ  [DOUBLE PRIME]
+                                               case '\u2036': 
+                                               // รƒยขรขโ€šยฌร‚ยถ  [REVERSED DOUBLE PRIME]
+                                               case '\u275D': 
+                                               // รƒยขรฏยฟยฝรฏยฟยฝ  [HEAVY DOUBLE TURNED COMMA QUOTATION MARK ORNAMENT]
+                                               case '\u275E': 
+                                               // รƒยขรฏยฟยฝร…ยพ  [HEAVY DOUBLE COMMA QUOTATION MARK ORNAMENT]
+                                               case '\u276E': 
+                                               // รƒยขรฏยฟยฝร‚ยฎ  [HEAVY LEFT-POINTING ANGLE QUOTATION MARK ORNAMENT]
+                                               case '\u276F': 
+                                               // รƒยขรฏยฟยฝร‚ยฏ  [HEAVY RIGHT-POINTING ANGLE QUOTATION MARK ORNAMENT]
+                                               case '\uFF02':  // รƒยฏร‚ยผรขโ‚ฌลก  [FULLWIDTH QUOTATION MARK]
+                                                       output[outputPos++] = '"';
+                                                       break;
+                                               
+                                               case '\u2018': 
+                                               // รƒยขรขโ€šยฌร‹ล“  [LEFT SINGLE QUOTATION MARK]
+                                               case '\u2019': 
+                                               // รƒยขรขโ€šยฌรขโ€žยข  [RIGHT SINGLE QUOTATION MARK]
+                                               case '\u201A': 
+                                               // รƒยขรขโ€šยฌร…ยก  [SINGLE LOW-9 QUOTATION MARK]
+                                               case '\u201B': 
+                                               // รƒยขรขโ€šยฌรขโ‚ฌยบ  [SINGLE HIGH-REVERSED-9 QUOTATION MARK]
+                                               case '\u2032': 
+                                               // รƒยขรขโ€šยฌร‚ยฒ  [PRIME]
+                                               case '\u2035': 
+                                               // รƒยขรขโ€šยฌร‚ยต  [REVERSED PRIME]
+                                               case '\u2039': 
+                                               // รƒยขรขโ€šยฌร‚ยน  [SINGLE LEFT-POINTING ANGLE QUOTATION MARK]
+                                               case '\u203A': 
+                                               // รƒยขรขโ€šยฌร‚ยบ  [SINGLE RIGHT-POINTING ANGLE QUOTATION MARK]
+                                               case '\u275B': 
+                                               // รƒยขรฏยฟยฝรขโ‚ฌยบ  [HEAVY SINGLE TURNED COMMA QUOTATION MARK ORNAMENT]
+                                               case '\u275C': 
+                                               // รƒยขรฏยฟยฝร…โ€œ  [HEAVY SINGLE COMMA QUOTATION MARK ORNAMENT]
+                                               case '\uFF07':  // รƒยฏร‚ยผรขโ‚ฌยก  [FULLWIDTH APOSTROPHE]
+                                                       output[outputPos++] = '\'';
+                                                       break;
+                                               
+                                               case '\u2010': 
+                                               // รƒยขรขโ€šยฌรฏยฟยฝ  [HYPHEN]
+                                               case '\u2011': 
+                                               // รƒยขรขโ€šยฌรขโ‚ฌหœ  [NON-BREAKING HYPHEN]
+                                               case '\u2012': 
+                                               // รƒยขรขโ€šยฌรขโ‚ฌโ„ข  [FIGURE DASH]
+                                               case '\u2013': 
+                                               // รƒยขรขโ€šยฌรขโ‚ฌล“  [EN DASH]
+                                               case '\u2014': 
+                                               // รƒยขรขโ€šยฌรฏยฟยฝ?  [EM DASH]
+                                               case '\u207B': 
+                                               // รƒยขรฏยฟยฝร‚ยป  [SUPERSCRIPT MINUS]
+                                               case '\u208B': 
+                                               // รƒยขรขโ‚ฌลกรขโ‚ฌยน  [SUBSCRIPT MINUS]
+                                               case '\uFF0D':  // รƒยฏร‚ยผรฏยฟยฝ  [FULLWIDTH HYPHEN-MINUS]
+                                                       output[outputPos++] = '-';
+                                                       break;
+                                               
+                                               case '\u2045': 
+                                               // รƒยขรฏยฟยฝรขโ‚ฌยฆ  [LEFT SQUARE BRACKET WITH QUILL]
+                                               case '\u2772': 
+                                               // รƒยขรฏยฟยฝร‚ยฒ  [LIGHT LEFT TORTOISE SHELL BRACKET ORNAMENT]
+                                               case '\uFF3B':  // รƒยฏร‚ยผร‚ยป  [FULLWIDTH LEFT SQUARE BRACKET]
+                                                       output[outputPos++] = '[';
+                                                       break;
+                                               
+                                               case '\u2046': 
+                                               // รƒยขรฏยฟยฝรขโ‚ฌย   [RIGHT SQUARE BRACKET WITH QUILL]
+                                               case '\u2773': 
+                                               // รƒยขรฏยฟยฝร‚ยณ  [LIGHT RIGHT TORTOISE SHELL BRACKET ORNAMENT]
+                                               case '\uFF3D':  // รƒยฏร‚ยผร‚ยฝ  [FULLWIDTH RIGHT SQUARE BRACKET]
+                                                       output[outputPos++] = ']';
+                                                       break;
+                                               
+                                               case '\u207D': 
+                                               // รƒยขรฏยฟยฝร‚ยฝ  [SUPERSCRIPT LEFT PARENTHESIS]
+                                               case '\u208D': 
+                                               // รƒยขรขโ‚ฌลกรฏยฟยฝ  [SUBSCRIPT LEFT PARENTHESIS]
+                                               case '\u2768': 
+                                               // รƒยขรฏยฟยฝร‚ยจ  [MEDIUM LEFT PARENTHESIS ORNAMENT]
+                                               case '\u276A': 
+                                               // รƒยขรฏยฟยฝร‚ยช  [MEDIUM FLATTENED LEFT PARENTHESIS ORNAMENT]
+                                               case '\uFF08':  // รƒยฏร‚ยผร‹โ€   [FULLWIDTH LEFT PARENTHESIS]
+                                                       output[outputPos++] = '(';
+                                                       break;
+                                               
+                                               case '\u2E28':  // รƒยขร‚ยธร‚ยจ  [LEFT DOUBLE PARENTHESIS]
+                                                       output[outputPos++] = '(';
+                                                       output[outputPos++] = '(';
+                                                       break;
+                                               
+                                               case '\u207E': 
+                                               // รƒยขรฏยฟยฝร‚ยพ  [SUPERSCRIPT RIGHT PARENTHESIS]
+                                               case '\u208E': 
+                                               // รƒยขรขโ‚ฌลกร…ยฝ  [SUBSCRIPT RIGHT PARENTHESIS]
+                                               case '\u2769': 
+                                               // รƒยขรฏยฟยฝร‚ยฉ  [MEDIUM RIGHT PARENTHESIS ORNAMENT]
+                                               case '\u276B': 
+                                               // รƒยขรฏยฟยฝร‚ยซ  [MEDIUM FLATTENED RIGHT PARENTHESIS ORNAMENT]
+                                               case '\uFF09':  // รƒยฏร‚ยผรขโ‚ฌยฐ  [FULLWIDTH RIGHT PARENTHESIS]
+                                                       output[outputPos++] = ')';
+                                                       break;
+                                               
+                                               case '\u2E29':  // รƒยขร‚ยธร‚ยฉ  [RIGHT DOUBLE PARENTHESIS]
+                                                       output[outputPos++] = ')';
+                                                       output[outputPos++] = ')';
+                                                       break;
+                                               
+                                               case '\u276C': 
+                                               // รƒยขรฏยฟยฝร‚ยฌ  [MEDIUM LEFT-POINTING ANGLE BRACKET ORNAMENT]
+                                               case '\u2770': 
+                                               // รƒยขรฏยฟยฝร‚ยฐ  [HEAVY LEFT-POINTING ANGLE BRACKET ORNAMENT]
+                                               case '\uFF1C':  // รƒยฏร‚ยผร…โ€œ  [FULLWIDTH LESS-THAN SIGN]
+                                                       output[outputPos++] = '<';
+                                                       break;
+                                               
+                                               case '\u276D': 
+                                               // รƒยขรฏยฟยฝร‚ยญ  [MEDIUM RIGHT-POINTING ANGLE BRACKET ORNAMENT]
+                                               case '\u2771': 
+                                               // รƒยขรฏยฟยฝร‚ยฑ  [HEAVY RIGHT-POINTING ANGLE BRACKET ORNAMENT]
+                                               case '\uFF1E':  // รƒยฏร‚ยผร…ยพ  [FULLWIDTH GREATER-THAN SIGN]
+                                                       output[outputPos++] = '>';
+                                                       break;
+                                               
+                                               case '\u2774': 
+                                               // รƒยขรฏยฟยฝร‚ยด  [MEDIUM LEFT CURLY BRACKET ORNAMENT]
+                                               case '\uFF5B':  // รƒยฏร‚ยฝรขโ‚ฌยบ  [FULLWIDTH LEFT CURLY BRACKET]
+                                                       output[outputPos++] = '{';
+                                                       break;
+                                               
+                                               case '\u2775': 
+                                               // รƒยขรฏยฟยฝร‚ยต  [MEDIUM RIGHT CURLY BRACKET ORNAMENT]
+                                               case '\uFF5D':  // รƒยฏร‚ยฝรฏยฟยฝ  [FULLWIDTH RIGHT CURLY BRACKET]
+                                                       output[outputPos++] = '}';
+                                                       break;
+                                               
+                                               case '\u207A': 
+                                               // รƒยขรฏยฟยฝร‚ยบ  [SUPERSCRIPT PLUS SIGN]
+                                               case '\u208A': 
+                                               // รƒยขรขโ‚ฌลกร…ย   [SUBSCRIPT PLUS SIGN]
+                                               case '\uFF0B':  // รƒยฏร‚ยผรขโ‚ฌยน  [FULLWIDTH PLUS SIGN]
+                                                       output[outputPos++] = '+';
+                                                       break;
+                                               
+                                               case '\u207C': 
+                                               // รƒยขรฏยฟยฝร‚ยผ  [SUPERSCRIPT EQUALS SIGN]
+                                               case '\u208C': 
+                                               // รƒยขรขโ‚ฌลกร…โ€™  [SUBSCRIPT EQUALS SIGN]
+                                               case '\uFF1D':  // รƒยฏร‚ยผรฏยฟยฝ  [FULLWIDTH EQUALS SIGN]
+                                                       output[outputPos++] = '=';
+                                                       break;
+                                               
+                                               case '\uFF01':  // รƒยฏร‚ยผรฏยฟยฝ  [FULLWIDTH EXCLAMATION MARK]
+                                                       output[outputPos++] = '!';
+                                                       break;
+                                               
+                                               case '\u203C':  // รƒยขรขโ€šยฌร‚ยผ  [DOUBLE EXCLAMATION MARK]
+                                                       output[outputPos++] = '!';
+                                                       output[outputPos++] = '!';
+                                                       break;
+                                               
+                                               case '\u2049':  // รƒยขรฏยฟยฝรขโ‚ฌยฐ  [EXCLAMATION QUESTION MARK]
+                                                       output[outputPos++] = '!';
+                                                       output[outputPos++] = '?';
+                                                       break;
+                                               
+                                               case '\uFF03':  // รƒยฏร‚ยผร†โ€™  [FULLWIDTH NUMBER SIGN]
+                                                       output[outputPos++] = '#';
+                                                       break;
+                                               
+                                               case '\uFF04':  // รƒยฏร‚ยผรขโ‚ฌลพ  [FULLWIDTH DOLLAR SIGN]
+                                                       output[outputPos++] = '$';
+                                                       break;
+                                               
+                                               case '\u2052': 
+                                               // รƒยขรฏยฟยฝรขโ‚ฌโ„ข  [COMMERCIAL MINUS SIGN]
+                                               case '\uFF05':  // รƒยฏร‚ยผรขโ‚ฌยฆ  [FULLWIDTH PERCENT SIGN]
+                                                       output[outputPos++] = '%';
+                                                       break;
+                                               
+                                               case '\uFF06':  // รƒยฏร‚ยผรขโ‚ฌย   [FULLWIDTH AMPERSAND]
+                                                       output[outputPos++] = '&';
+                                                       break;
+                                               
+                                               case '\u204E': 
+                                               // รƒยขรฏยฟยฝร…ยฝ  [LOW ASTERISK]
+                                               case '\uFF0A':  // รƒยฏร‚ยผร…ย   [FULLWIDTH ASTERISK]
+                                                       output[outputPos++] = '*';
+                                                       break;
+                                               
+                                               case '\uFF0C':  // รƒยฏร‚ยผร…โ€™  [FULLWIDTH COMMA]
+                                                       output[outputPos++] = ',';
+                                                       break;
+                                               
+                                               case '\uFF0E':  // รƒยฏร‚ยผร…ยฝ  [FULLWIDTH FULL STOP]
+                                                       output[outputPos++] = '.';
+                                                       break;
+                                               
+                                               case '\u2044': 
+                                               // รƒยขรฏยฟยฝรขโ‚ฌลพ  [FRACTION SLASH]
+                                               case '\uFF0F':  // รƒยฏร‚ยผรฏยฟยฝ  [FULLWIDTH SOLIDUS]
+                                                       output[outputPos++] = '/';
+                                                       break;
+                                               
+                                               case '\uFF1A':  // รƒยฏร‚ยผร…ยก  [FULLWIDTH COLON]
+                                                       output[outputPos++] = ':';
+                                                       break;
+                                               
+                                               case '\u204F': 
+                                               // รƒยขรฏยฟยฝรฏยฟยฝ  [REVERSED SEMICOLON]
+                                               case '\uFF1B':  // รƒยฏร‚ยผรขโ‚ฌยบ  [FULLWIDTH SEMICOLON]
+                                                       output[outputPos++] = ';';
+                                                       break;
+                                               
+                                               case '\uFF1F':  // รƒยฏร‚ยผร…ยธ  [FULLWIDTH QUESTION MARK]
+                                                       output[outputPos++] = '?';
+                                                       break;
+                                               
+                                               case '\u2047':  // รƒยขรฏยฟยฝรขโ‚ฌยก  [DOUBLE QUESTION MARK]
+                                                       output[outputPos++] = '?';
+                                                       output[outputPos++] = '?';
+                                                       break;
+                                               
+                                               case '\u2048':  // รƒยขรฏยฟยฝร‹โ€   [QUESTION EXCLAMATION MARK]
+                                                       output[outputPos++] = '?';
+                                                       output[outputPos++] = '!';
+                                                       break;
+                                               
+                                               case '\uFF20':  // รƒยฏร‚ยผร‚ย   [FULLWIDTH COMMERCIAL AT]
+                                                       output[outputPos++] = '@';
+                                                       break;
+                                               
+                                               case '\uFF3C':  // รƒยฏร‚ยผร‚ยผ  [FULLWIDTH REVERSE SOLIDUS]
+                                                       output[outputPos++] = '\\';
+                                                       break;
+                                               
+                                               case '\u2038': 
+                                               // รƒยขรขโ€šยฌร‚ยธ  [CARET]
+                                               case '\uFF3E':  // รƒยฏร‚ยผร‚ยพ  [FULLWIDTH CIRCUMFLEX ACCENT]
+                                                       output[outputPos++] = '^';
+                                                       break;
+                                               
+                                               case '\uFF3F':  // รƒยฏร‚ยผร‚ยฟ  [FULLWIDTH LOW LINE]
+                                                       output[outputPos++] = '_';
+                                                       break;
+                                               
+                                               case '\u2053': 
+                                               // รƒยขรฏยฟยฝรขโ‚ฌล“  [SWUNG DASH]
+                                               case '\uFF5E':  // รƒยฏร‚ยฝร…ยพ  [FULLWIDTH TILDE]
+                                                       output[outputPos++] = '~';
+                                                       break;
+                                               
+                                               default: 
+                                                       output[outputPos++] = c;
+                                                       break;
+                                               
+                                       }
+                               }
+                       }
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/Analyzer.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/Analyzer.cs
new file mode 100644 (file)
index 0000000..6ba6a48
--- /dev/null
@@ -0,0 +1,181 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using Fieldable = Mono.Lucene.Net.Documents.Fieldable;
+using AlreadyClosedException = Mono.Lucene.Net.Store.AlreadyClosedException;
+using CloseableThreadLocal = Mono.Lucene.Net.Util.CloseableThreadLocal;
+
+namespace Mono.Lucene.Net.Analysis
+{
+       
+       /// <summary>An Analyzer builds TokenStreams, which analyze text.  It thus represents a
+       /// policy for extracting index terms from text.
+       /// <p/>
+       /// Typical implementations first build a Tokenizer, which breaks the stream of
+       /// characters from the Reader into raw Tokens.  One or more TokenFilters may
+       /// then be applied to the output of the Tokenizer.
+       /// </summary>
+       public abstract class Analyzer
+       {
+               /// <summary>Creates a TokenStream which tokenizes all the text in the provided
+               /// Reader.  Must be able to handle null field name for
+               /// backward compatibility.
+               /// </summary>
+               public abstract TokenStream TokenStream(System.String fieldName, System.IO.TextReader reader);
+               
+               /// <summary>Creates a TokenStream that is allowed to be re-used
+               /// from the previous time that the same thread called
+               /// this method.  Callers that do not need to use more
+               /// than one TokenStream at the same time from this
+               /// analyzer should use this method for better
+               /// performance.
+               /// </summary>
+               public virtual TokenStream ReusableTokenStream(System.String fieldName, System.IO.TextReader reader)
+               {
+                       return TokenStream(fieldName, reader);
+               }
+               
+               private CloseableThreadLocal tokenStreams = new CloseableThreadLocal();
+               
+               /// <summary>Used by Analyzers that implement reusableTokenStream
+               /// to retrieve previously saved TokenStreams for re-use
+               /// by the same thread. 
+               /// </summary>
+               protected internal virtual System.Object GetPreviousTokenStream()
+               {
+                       try
+                       {
+                               return tokenStreams.Get();
+                       }
+                       catch (System.NullReferenceException npe)
+                       {
+                               if (tokenStreams == null)
+                               {
+                                       throw new AlreadyClosedException("this Analyzer is closed");
+                               }
+                               else
+                               {
+                                       throw npe;
+                               }
+                       }
+               }
+               
+               /// <summary>Used by Analyzers that implement reusableTokenStream
+               /// to save a TokenStream for later re-use by the same
+               /// thread. 
+               /// </summary>
+               protected internal virtual void  SetPreviousTokenStream(System.Object obj)
+               {
+                       try
+                       {
+                               tokenStreams.Set(obj);
+                       }
+                       catch (System.NullReferenceException npe)
+                       {
+                               if (tokenStreams == null)
+                               {
+                                       throw new AlreadyClosedException("this Analyzer is closed");
+                               }
+                               else
+                               {
+                                       throw npe;
+                               }
+                       }
+               }
+               
+               protected internal bool overridesTokenStreamMethod;
+               
+               /// <deprecated> This is only present to preserve
+               /// back-compat of classes that subclass a core analyzer
+               /// and override tokenStream but not reusableTokenStream 
+               /// </deprecated>
+        [Obsolete("This is only present to preserve back-compat of classes that subclass a core analyzer and override tokenStream but not reusableTokenStream ")]
+               protected internal virtual void  SetOverridesTokenStreamMethod(System.Type baseClass)
+               {
+                       
+                       System.Type[] params_Renamed = new System.Type[2];
+                       params_Renamed[0] = typeof(System.String);
+                       params_Renamed[1] = typeof(System.IO.TextReader);
+                       
+                       try
+                       {
+                               System.Reflection.MethodInfo m = this.GetType().GetMethod("TokenStream", (params_Renamed == null)?new System.Type[0]:(System.Type[]) params_Renamed);
+                               if (m != null)
+                               {
+                                       overridesTokenStreamMethod = m.DeclaringType != baseClass;
+                               }
+                               else
+                               {
+                                       overridesTokenStreamMethod = false;
+                               }
+                       }
+                       catch (System.MethodAccessException nsme)
+                       {
+                               overridesTokenStreamMethod = false;
+                       }
+               }
+               
+               
+               /// <summary> Invoked before indexing a Fieldable instance if
+               /// terms have already been added to that field.  This allows custom
+               /// analyzers to place an automatic position increment gap between
+               /// Fieldable instances using the same field name.  The default value
+               /// position increment gap is 0.  With a 0 position increment gap and
+               /// the typical default token position increment of 1, all terms in a field,
+               /// including across Fieldable instances, are in successive positions, allowing
+               /// exact PhraseQuery matches, for instance, across Fieldable instance boundaries.
+               /// 
+               /// </summary>
+               /// <param name="fieldName">Fieldable name being indexed.
+               /// </param>
+               /// <returns> position increment gap, added to the next token emitted from {@link #TokenStream(String,Reader)}
+               /// </returns>
+               public virtual int GetPositionIncrementGap(System.String fieldName)
+               {
+                       return 0;
+               }
+               
+               /// <summary> Just like {@link #getPositionIncrementGap}, except for
+               /// Token offsets instead.  By default this returns 1 for
+               /// tokenized fields and, as if the fields were joined
+               /// with an extra space character, and 0 for un-tokenized
+               /// fields.  This method is only called if the field
+               /// produced at least one token for indexing.
+               /// 
+               /// </summary>
+               /// <param name="field">the field just indexed
+               /// </param>
+               /// <returns> offset gap, added to the next token emitted from {@link #TokenStream(String,Reader)}
+               /// </returns>
+               public virtual int GetOffsetGap(Fieldable field)
+               {
+                       if (field.IsTokenized())
+                               return 1;
+                       else
+                               return 0;
+               }
+               
+               /// <summary>Frees persistent resources used by this Analyzer </summary>
+               public virtual void  Close()
+               {
+                       tokenStreams.Close();
+                       tokenStreams = null;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/BaseCharFilter.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/BaseCharFilter.cs
new file mode 100644 (file)
index 0000000..4732715
--- /dev/null
@@ -0,0 +1,97 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+using Mono.Lucene.Net.Util;
+
+namespace Mono.Lucene.Net.Analysis
+{
+
+    /// <summary>
+    /// * Base utility class for implementing a {@link CharFilter}.
+    /// * You subclass this, and then record mappings by calling
+    /// * {@link #addOffCorrectMap}, and then invoke the correct
+    /// * method to correct an offset.
+    /// </summary>
+    public abstract class BaseCharFilter : CharFilter
+    {
+
+        private int[] offsets;
+        private int[] diffs;
+        private int size = 0;
+
+        public BaseCharFilter(CharStream @in) : base(@in)
+        {
+        }
+
+        /** Retrieve the corrected offset. */
+        //@Override
+        public override int Correct(int currentOff)
+        {
+            if (offsets == null || currentOff < offsets[0])
+            {
+                return currentOff;
+            }
+
+            int hi = size - 1;
+            if (currentOff >= offsets[hi])
+                return currentOff + diffs[hi];
+
+            int lo = 0;
+            int mid = -1;
+
+            while (hi >= lo)
+            {
+                mid = SupportClass.Number.URShift(lo + hi, 1);
+                if (currentOff < offsets[mid])
+                    hi = mid - 1;
+                else if (currentOff > offsets[mid])
+                    lo = mid + 1;
+                else
+                    return currentOff + diffs[mid];
+            }
+
+            if (currentOff < offsets[mid])
+                return mid == 0 ? currentOff : currentOff + diffs[mid - 1];
+            else
+                return currentOff + diffs[mid];
+        }
+
+        protected int GetLastCumulativeDiff()
+        {
+            return offsets == null ?
+              0 : diffs[size - 1];
+        }
+
+        protected void AddOffCorrectMap(int off, int cumulativeDiff)
+        {
+            if (offsets == null)
+            {
+                offsets = new int[64];
+                diffs = new int[64];
+            }
+            else if (size == offsets.Length)
+            {
+                offsets = ArrayUtil.Grow(offsets);
+                diffs = ArrayUtil.Grow(diffs);
+            }
+
+            offsets[size] = off;
+            diffs[size++] = cumulativeDiff;
+        }
+    }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/CachingTokenFilter.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/CachingTokenFilter.cs
new file mode 100644 (file)
index 0000000..88632c1
--- /dev/null
@@ -0,0 +1,108 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using AttributeSource = Mono.Lucene.Net.Util.AttributeSource;
+
+namespace Mono.Lucene.Net.Analysis
+{
+       
+       /// <summary> This class can be used if the token attributes of a TokenStream
+       /// are intended to be consumed more than once. It caches
+       /// all token attribute states locally in a List.
+       /// 
+       /// <p/>CachingTokenFilter implements the optional method
+       /// {@link TokenStream#Reset()}, which repositions the
+       /// stream to the first Token. 
+       /// </summary>
+       public class CachingTokenFilter:TokenFilter
+       {
+               private System.Collections.IList cache = null;
+               private System.Collections.IEnumerator iterator = null;
+               private AttributeSource.State finalState;
+               
+               public CachingTokenFilter(TokenStream input):base(input)
+               {
+               }
+               
+               /// <deprecated> Will be removed in Lucene 3.0. This method is final, as it should
+               /// not be overridden. Delegates to the backwards compatibility layer. 
+               /// </deprecated>
+        [Obsolete("Will be removed in Lucene 3.0. This method is final, as it should not be overridden. Delegates to the backwards compatibility layer. ")]
+               public override Token Next(Token reusableToken)
+               {
+                       return base.Next(reusableToken);
+               }
+               
+               /// <deprecated> Will be removed in Lucene 3.0. This method is final, as it should
+               /// not be overridden. Delegates to the backwards compatibility layer. 
+               /// </deprecated>
+        [Obsolete("Will be removed in Lucene 3.0. This method is final, as it should not be overridden. Delegates to the backwards compatibility layer. ")]
+               public override Token Next()
+               {
+                       return base.Next();
+               }
+               
+               public override bool IncrementToken()
+               {
+                       if (cache == null)
+                       {
+                               // fill cache lazily
+                               cache = new System.Collections.ArrayList();
+                               FillCache();
+                               iterator = cache.GetEnumerator();
+                       }
+                       
+                       if (!iterator.MoveNext())
+                       {
+                               // the cache is exhausted, return false
+                               return false;
+                       }
+                       // Since the TokenFilter can be reset, the tokens need to be preserved as immutable.
+                       RestoreState((AttributeSource.State) iterator.Current);
+                       return true;
+               }
+               
+               public override void  End()
+               {
+                       if (finalState != null)
+                       {
+                               RestoreState(finalState);
+                       }
+               }
+               
+               public override void  Reset()
+               {
+                       if (cache != null)
+                       {
+                               iterator = cache.GetEnumerator();
+                       }
+               }
+               
+               private void  FillCache()
+               {
+                       while (input.IncrementToken())
+                       {
+                               cache.Add(CaptureState());
+                       }
+                       // capture final state
+                       input.End();
+                       finalState = CaptureState();
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/CharArraySet.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/CharArraySet.cs
new file mode 100644 (file)
index 0000000..2c99daa
--- /dev/null
@@ -0,0 +1,472 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+namespace Mono.Lucene.Net.Analysis
+{
+       
+       
+       /// <summary> A simple class that stores Strings as char[]'s in a
+       /// hash table.  Note that this is not a general purpose
+       /// class.  For example, it cannot remove items from the
+       /// set, nor does it resize its hash table to be smaller,
+       /// etc.  It is designed to be quick to test if a char[]
+       /// is in the set without the necessity of converting it
+       /// to a String first.
+       /// </summary>
+       
+       public class CharArraySet:System.Collections.Hashtable
+       {
+               public override int Count
+               {
+                       get
+                       {
+                               return count;
+                       }
+                       
+               }
+               private const int INIT_SIZE = 8;
+               private char[][] entries;
+               private int count;
+               private bool ignoreCase;
+               
+               /// <summary>Create set with enough capacity to hold startSize
+               /// terms 
+               /// </summary>
+               public CharArraySet(int startSize, bool ignoreCase)
+               {
+                       this.ignoreCase = ignoreCase;
+                       int size = INIT_SIZE;
+                       while (startSize + (startSize >> 2) > size)
+                               size <<= 1;
+                       entries = new char[size][];
+               }
+               
+               /// <summary>Create set from a Collection of char[] or String </summary>
+               public CharArraySet(System.Collections.ICollection c, bool ignoreCase):this(c.Count, ignoreCase)
+               {
+            System.Collections.IEnumerator e = c is CharArraySet ? ((CharArraySet)c).GetEnumerator() : c.GetEnumerator();
+                       while (e.MoveNext())
+                       {
+                               Add(e.Current);
+                       }
+               }
+               /// <summary>Create set from entries </summary>
+               private CharArraySet(char[][] entries, bool ignoreCase, int count)
+               {
+                       this.entries = entries;
+                       this.ignoreCase = ignoreCase;
+                       this.count = count;
+               }
+               
+               /// <summary>true if the <code>len</code> chars of <code>text</code> starting at <code>off</code>
+               /// are in the set 
+               /// </summary>
+               public virtual bool Contains(char[] text, int off, int len)
+               {
+                       return entries[GetSlot(text, off, len)] != null;
+               }
+               
+               /// <summary>true if the <code>System.String</code> is in the set </summary>
+               public virtual bool Contains(System.String cs)
+               {
+                       return entries[GetSlot(cs)] != null;
+               }
+               
+               private int GetSlot(char[] text, int off, int len)
+               {
+                       int code = GetHashCode(text, off, len);
+                       int pos = code & (entries.Length - 1);
+                       char[] text2 = entries[pos];
+                       if (text2 != null && !Equals(text, off, len, text2))
+                       {
+                               int inc = ((code >> 8) + code) | 1;
+                               do 
+                               {
+                                       code += inc;
+                                       pos = code & (entries.Length - 1);
+                                       text2 = entries[pos];
+                               }
+                               while (text2 != null && !Equals(text, off, len, text2));
+                       }
+                       return pos;
+               }
+               
+               /// <summary>Returns true if the String is in the set </summary>
+               private int GetSlot(System.String text)
+               {
+                       int code = GetHashCode(text);
+                       int pos = code & (entries.Length - 1);
+                       char[] text2 = entries[pos];
+                       if (text2 != null && !Equals(text, text2))
+                       {
+                               int inc = ((code >> 8) + code) | 1;
+                               do 
+                               {
+                                       code += inc;
+                                       pos = code & (entries.Length - 1);
+                                       text2 = entries[pos];
+                               }
+                               while (text2 != null && !Equals(text, text2));
+                       }
+                       return pos;
+               }
+               
+               /// <summary>Add this String into the set </summary>
+               public virtual bool Add(System.String text)
+               {
+                       return Add(text.ToCharArray());
+               }
+               
+               /// <summary>Add this char[] directly to the set.
+               /// If ignoreCase is true for this Set, the text array will be directly modified.
+               /// The user should never modify this text array after calling this method.
+               /// </summary>
+               public virtual bool Add(char[] text)
+               {
+                       if (ignoreCase)
+                               for (int i = 0; i < text.Length; i++)
+                                       text[i] = System.Char.ToLower(text[i]);
+                       int slot = GetSlot(text, 0, text.Length);
+                       if (entries[slot] != null)
+                               return false;
+                       entries[slot] = text;
+                       count++;
+                       
+                       if (count + (count >> 2) > entries.Length)
+                       {
+                               Rehash();
+                       }
+                       
+                       return true;
+               }
+               
+               private bool Equals(char[] text1, int off, int len, char[] text2)
+               {
+                       if (len != text2.Length)
+                               return false;
+                       if (ignoreCase)
+                       {
+                               for (int i = 0; i < len; i++)
+                               {
+                                       if (System.Char.ToLower(text1[off + i]) != text2[i])
+                                               return false;
+                               }
+                       }
+                       else
+                       {
+                               for (int i = 0; i < len; i++)
+                               {
+                                       if (text1[off + i] != text2[i])
+                                               return false;
+                               }
+                       }
+                       return true;
+               }
+               
+               private bool Equals(System.String text1, char[] text2)
+               {
+                       int len = text1.Length;
+                       if (len != text2.Length)
+                               return false;
+                       if (ignoreCase)
+                       {
+                               for (int i = 0; i < len; i++)
+                               {
+                                       if (System.Char.ToLower(text1[i]) != text2[i])
+                                               return false;
+                               }
+                       }
+                       else
+                       {
+                               for (int i = 0; i < len; i++)
+                               {
+                                       if (text1[i] != text2[i])
+                                               return false;
+                               }
+                       }
+                       return true;
+               }
+               
+               private void  Rehash()
+               {
+                       int newSize = 2 * entries.Length;
+                       char[][] oldEntries = entries;
+                       entries = new char[newSize][];
+                       
+                       for (int i = 0; i < oldEntries.Length; i++)
+                       {
+                               char[] text = oldEntries[i];
+                               if (text != null)
+                               {
+                                       // todo: could be faster... no need to compare strings on collision
+                                       entries[GetSlot(text, 0, text.Length)] = text;
+                               }
+                       }
+               }
+
+        private int GetHashCode(char[] text, int offset, int len)
+               {
+                       int code = 0;
+                       int stop = offset + len;
+                       if (ignoreCase)
+                       {
+                               for (int i = offset; i < stop; i++)
+                               {
+                                       code = code * 31 + System.Char.ToLower(text[i]);
+                               }
+                       }
+                       else
+                       {
+                               for (int i = offset; i < stop; i++)
+                               {
+                                       code = code * 31 + text[i];
+                               }
+                       }
+                       return code;
+               }
+               
+               private int GetHashCode(System.String text)
+               {
+                       int code = 0;
+                       int len = text.Length;
+                       if (ignoreCase)
+                       {
+                               for (int i = 0; i < len; i++)
+                               {
+                                       code = code * 31 + System.Char.ToLower(text[i]);
+                               }
+                       }
+                       else
+                       {
+                               for (int i = 0; i < len; i++)
+                               {
+                                       code = code * 31 + text[i];
+                               }
+                       }
+                       return code;
+               }
+               
+               public virtual int Size()
+               {
+                       return count;
+               }
+               
+               public virtual bool IsEmpty()
+               {
+                       return count == 0;
+               }
+               
+               public override bool Contains(System.Object o)
+               {
+                       if (o is char[])
+                       {
+                               char[] text = (char[]) o;
+                               return Contains(text, 0, text.Length);
+                       }
+                       return Contains(o.ToString());
+               }
+
+        //LUCENENET-414 (https://issues.apache.org/jira/browse/LUCENENET-414)
+        public virtual bool Add(object key, object value)
+        {
+            return Add(key);
+        }
+
+               public virtual bool Add(System.Object o)
+               {
+                       if (o is char[])
+                       {
+                               return Add((char[]) o);
+                       }
+
+            if (o is System.Collections.Hashtable)
+            {
+                foreach (string word in ((System.Collections.Hashtable)o).Keys)
+                {
+                    Add(word);
+                }
+                return true;
+            }
+
+                       return Add(o.ToString());
+               }
+               
+               /// <summary> Returns an unmodifiable {@link CharArraySet}. This allows to provide
+               /// unmodifiable views of internal sets for "read-only" use.
+               /// 
+               /// </summary>
+               /// <param name="set">a set for which the unmodifiable set is returned.
+               /// </param>
+               /// <returns> an new unmodifiable {@link CharArraySet}.
+               /// </returns>
+               /// <throws>  NullPointerException </throws>
+               /// <summary>           if the given set is <code>null</code>.
+               /// </summary>
+               public static CharArraySet UnmodifiableSet(CharArraySet set_Renamed)
+               {
+                       if (set_Renamed == null)
+                               throw new System.NullReferenceException("Given set is null");
+                       /*
+                       * Instead of delegating calls to the given set copy the low-level values to
+                       * the unmodifiable Subclass
+                       */
+                       return new UnmodifiableCharArraySet(set_Renamed.entries, set_Renamed.ignoreCase, set_Renamed.count);
+               }
+
+        /// <summary>The Iterator&lt;String&gt; for this set.  Strings are constructed on the fly, so
+               /// use <code>nextCharArray</code> for more efficient access. 
+               /// </summary>
+               public class CharArraySetIterator : System.Collections.IEnumerator
+               {
+                       private void  InitBlock(CharArraySet enclosingInstance)
+                       {
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private CharArraySet enclosingInstance;
+            /// <summary>Returns the next String, as a Set&lt;String&gt; would...
+                       /// use nextCharArray() for better efficiency. 
+                       /// </summary>
+                       public virtual System.Object Current
+                       {
+                               get
+                               {
+                                       return new System.String(NextCharArray());
+                               }
+                               
+                       }
+                       public CharArraySet Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       internal int pos = - 1;
+                       internal char[] next_Renamed_Field;
+                       internal CharArraySetIterator(CharArraySet enclosingInstance)
+                       {
+                               InitBlock(enclosingInstance);
+                               GoNext();
+                       }
+                       
+                       private void  GoNext()
+                       {
+                               next_Renamed_Field = null;
+                               pos++;
+                               while (pos < Enclosing_Instance.entries.Length && (next_Renamed_Field = Enclosing_Instance.entries[pos]) == null)
+                                       pos++;
+                       }
+                       
+                       public virtual bool MoveNext()
+                       {
+                               return next_Renamed_Field != null;
+                       }
+                       
+                       /// <summary>do not modify the returned char[] </summary>
+                       public virtual char[] NextCharArray()
+                       {
+                               char[] ret = next_Renamed_Field;
+                               GoNext();
+                               return ret;
+                       }
+                       
+                       public virtual void  Remove()
+                       {
+                               throw new System.NotSupportedException();
+                       }
+                       
+                       virtual public void  Reset()
+                       {
+                System.Diagnostics.Debug.Fail("Port issue:", "Need to implement this call, CharArraySetIterator.Reset()");  // {{Aroush-2.9
+                       }
+               }
+               
+               
+               public new System.Collections.IEnumerator GetEnumerator()
+               {
+                       return new CharArraySetIterator(this);
+               }
+               
+               /// <summary> Efficient unmodifiable {@link CharArraySet}. This implementation does not
+               /// delegate calls to a give {@link CharArraySet} like
+               /// {@link Collections#UnmodifiableSet(java.util.Set)} does. Instead is passes
+               /// the internal representation of a {@link CharArraySet} to a super
+               /// constructor and overrides all mutators. 
+               /// </summary>
+               private sealed class UnmodifiableCharArraySet:CharArraySet
+               {
+                       
+                       internal UnmodifiableCharArraySet(char[][] entries, bool ignoreCase, int count):base(entries, ignoreCase, count)
+                       {
+                       }
+                       
+                       public override bool Add(System.Object o)
+                       {
+                               throw new System.NotSupportedException();
+                       }
+                       
+                       public override bool AddAll(System.Collections.ICollection coll)
+                       {
+                               throw new System.NotSupportedException();
+                       }
+                       
+                       public override bool Add(char[] text)
+                       {
+                               throw new System.NotSupportedException();
+                       }
+                       
+                       public override bool Add(System.String text)
+                       {
+                               throw new System.NotSupportedException();
+                       }
+               }
+
+        /// <summary>Adds all of the elements in the specified collection to this collection </summary>
+        public virtual bool AddAll(System.Collections.ICollection items)
+        {
+            bool added = false;
+            System.Collections.IEnumerator iter = items.GetEnumerator();
+            System.Object item;
+            while (iter.MoveNext())
+            {
+                item = iter.Current;
+                added = Add(item);
+            }
+            return added;
+        }
+
+        /// <summary>Removes all elements from the set </summary>
+        public virtual new bool Clear()
+        {
+            throw new System.NotSupportedException();
+        }
+
+        /// <summary>Removes from this set all of its elements that are contained in the specified collection </summary>
+        public virtual bool RemoveAll(System.Collections.ICollection items)
+        {
+            throw new System.NotSupportedException();
+        }
+
+        /// <summary>Retains only the elements in this set that are contained in the specified collection </summary>
+        public bool RetainAll(System.Collections.ICollection coll)
+        {
+            throw new System.NotSupportedException();
+        }
+    }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/CharFilter.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/CharFilter.cs
new file mode 100644 (file)
index 0000000..f6413b8
--- /dev/null
@@ -0,0 +1,89 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Analysis
+{
+       
+       /// <summary> Subclasses of CharFilter can be chained to filter CharStream.
+       /// They can be used as {@link java.io.Reader} with additional offset
+       /// correction. {@link Tokenizer}s will automatically use {@link #CorrectOffset}
+       /// if a CharFilter/CharStream subclass is used.
+       /// 
+       /// </summary>
+       /// <version>  $Id$
+       /// 
+       /// </version>
+       public abstract class CharFilter:CharStream
+       {
+        private long currentPosition = -1;
+               
+               protected internal CharStream input;
+               
+               protected internal CharFilter(CharStream in_Renamed) : base(in_Renamed)
+               {
+                       input = in_Renamed;
+               }
+               
+               /// <summary> Subclass may want to override to correct the current offset.
+               /// 
+               /// </summary>
+               /// <param name="currentOff">current offset
+               /// </param>
+               /// <returns> corrected offset
+               /// </returns>
+               public /*protected internal*/ virtual int Correct(int currentOff)
+               {
+                       return currentOff;
+               }
+               
+               /// <summary> Chains the corrected offset through the input
+               /// CharFilter.
+               /// </summary>
+               public override int CorrectOffset(int currentOff)
+               {
+                       return input.CorrectOffset(Correct(currentOff));
+               }
+               
+               public override void  Close()
+               {
+                       input.Close();
+               }
+               
+               public  override int Read(System.Char[] cbuf, int off, int len)
+               {
+                       return input.Read(cbuf, off, len);
+               }
+               
+               public bool MarkSupported()
+               {
+            return input.BaseStream.CanSeek;
+               }
+               
+               public void  Mark(int readAheadLimit)
+               {
+            currentPosition = input.BaseStream.Position;
+                       input.BaseStream.Position = readAheadLimit;
+               }
+               
+               public void  Reset()
+               {
+                       input.BaseStream.Position = currentPosition;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/CharReader.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/CharReader.cs
new file mode 100644 (file)
index 0000000..66b3262
--- /dev/null
@@ -0,0 +1,83 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Analysis
+{
+       
+       /// <summary> CharReader is a Reader wrapper. It reads chars from
+       /// Reader and outputs {@link CharStream}, defining an
+       /// identify function {@link #CorrectOffset} method that
+       /// simply returns the provided offset.
+       /// </summary>
+       public sealed class CharReader:CharStream
+       {
+        private long currentPosition = -1;
+               
+               internal System.IO.StreamReader input;
+               
+               public static CharStream Get(System.IO.TextReader input)
+               {
+            if (input is CharStream)
+                return (CharStream) input;
+            else
+            {
+                // {{Aroush-2.9}} isn't there a better (faster) way to do this?
+                System.IO.MemoryStream theString = new System.IO.MemoryStream(System.Text.Encoding.UTF8.GetBytes(input.ReadToEnd()));
+                return new CharReader(new System.IO.StreamReader(theString));
+            }
+                       //return input is CharStream?(CharStream) input:new CharReader(input);
+               }
+               
+               private CharReader(System.IO.StreamReader in_Renamed) : base(in_Renamed)
+               {
+                       input = in_Renamed;
+               }
+               
+               public override int CorrectOffset(int currentOff)
+               {
+                       return currentOff;
+               }
+               
+               public override void  Close()
+               {
+                       input.Close();
+               }
+               
+               public  override int Read(System.Char[] cbuf, int off, int len)
+               {
+                       return input.Read(cbuf, off, len);
+               }
+               
+               public bool MarkSupported()
+               {
+                       return input.BaseStream.CanSeek;
+               }
+               
+               public void  Mark(int readAheadLimit)
+               {
+                       currentPosition = input.BaseStream.Position;
+                       input.BaseStream.Position = readAheadLimit;
+        }
+               
+               public void  Reset()
+               {
+                       input.BaseStream.Position = currentPosition;
+        }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/CharStream.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/CharStream.cs
new file mode 100644 (file)
index 0000000..0043c63
--- /dev/null
@@ -0,0 +1,47 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Analysis
+{
+       
+       /// <summary> CharStream adds {@link #CorrectOffset}
+       /// functionality over {@link Reader}.  All Tokenizers accept a
+       /// CharStream instead of {@link Reader} as input, which enables
+       /// arbitrary character based filtering before tokenization. 
+       /// The {@link #CorrectOffset} method fixed offsets to account for
+       /// removal or insertion of characters, so that the offsets
+       /// reported in the tokens match the character offsets of the
+       /// original Reader.
+    /// </summary>
+       public abstract class CharStream:System.IO.StreamReader
+       {
+        public CharStream(System.IO.StreamReader reader) : base(reader.BaseStream)
+        {
+        }
+               
+               /// <summary> Called by CharFilter(s) and Tokenizer to correct token offset.
+               /// 
+               /// </summary>
+               /// <param name="currentOff">offset as seen in the output
+               /// </param>
+               /// <returns> corrected offset based on the input
+               /// </returns>
+               public abstract int CorrectOffset(int currentOff);
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/CharTokenizer.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/CharTokenizer.cs
new file mode 100644 (file)
index 0000000..dbe5ccd
--- /dev/null
@@ -0,0 +1,157 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using OffsetAttribute = Mono.Lucene.Net.Analysis.Tokenattributes.OffsetAttribute;
+using TermAttribute = Mono.Lucene.Net.Analysis.Tokenattributes.TermAttribute;
+using AttributeSource = Mono.Lucene.Net.Util.AttributeSource;
+
+namespace Mono.Lucene.Net.Analysis
+{
+       
+       /// <summary>An abstract base class for simple, character-oriented tokenizers.</summary>
+       public abstract class CharTokenizer:Tokenizer
+       {
+               public CharTokenizer(System.IO.TextReader input):base(input)
+               {
+                       offsetAtt = (OffsetAttribute) AddAttribute(typeof(OffsetAttribute));
+                       termAtt = (TermAttribute) AddAttribute(typeof(TermAttribute));
+               }
+               
+               public CharTokenizer(AttributeSource source, System.IO.TextReader input):base(source, input)
+               {
+                       offsetAtt = (OffsetAttribute) AddAttribute(typeof(OffsetAttribute));
+                       termAtt = (TermAttribute) AddAttribute(typeof(TermAttribute));
+               }
+               
+               public CharTokenizer(AttributeFactory factory, System.IO.TextReader input):base(factory, input)
+               {
+                       offsetAtt = (OffsetAttribute) AddAttribute(typeof(OffsetAttribute));
+                       termAtt = (TermAttribute) AddAttribute(typeof(TermAttribute));
+               }
+               
+               private int offset = 0, bufferIndex = 0, dataLen = 0;
+               private const int MAX_WORD_LEN = 255;
+               private const int IO_BUFFER_SIZE = 4096;
+               private char[] ioBuffer = new char[IO_BUFFER_SIZE];
+               
+               private TermAttribute termAtt;
+               private OffsetAttribute offsetAtt;
+               
+               /// <summary>Returns true iff a character should be included in a token.  This
+               /// tokenizer generates as tokens adjacent sequences of characters which
+               /// satisfy this predicate.  Characters for which this is false are used to
+               /// define token boundaries and are not included in tokens. 
+               /// </summary>
+               protected internal abstract bool IsTokenChar(char c);
+               
+               /// <summary>Called on each token character to normalize it before it is added to the
+               /// token.  The default implementation does nothing.  Subclasses may use this
+               /// to, e.g., lowercase tokens. 
+               /// </summary>
+               protected internal virtual char Normalize(char c)
+               {
+                       return c;
+               }
+               
+               public override bool IncrementToken()
+               {
+                       ClearAttributes();
+                       int length = 0;
+                       int start = bufferIndex;
+                       char[] buffer = termAtt.TermBuffer();
+                       while (true)
+                       {
+                               
+                               if (bufferIndex >= dataLen)
+                               {
+                                       offset += dataLen;
+                                       dataLen = input.Read((System.Char[]) ioBuffer, 0, ioBuffer.Length);
+                                       if (dataLen <= 0)
+                                       {
+                                               dataLen = 0; // so next offset += dataLen won't decrement offset
+                                               if (length > 0)
+                                                       break;
+                                               else
+                                                       return false;
+                                       }
+                                       bufferIndex = 0;
+                               }
+                               
+                               char c = ioBuffer[bufferIndex++];
+                               
+                               if (IsTokenChar(c))
+                               {
+                                       // if it's a token char
+                                       
+                                       if (length == 0)
+                                       // start of token
+                                               start = offset + bufferIndex - 1;
+                                       else if (length == buffer.Length)
+                                               buffer = termAtt.ResizeTermBuffer(1 + length);
+                                       
+                                       buffer[length++] = Normalize(c); // buffer it, normalized
+                                       
+                                       if (length == MAX_WORD_LEN)
+                                       // buffer overflow!
+                                               break;
+                               }
+                               else if (length > 0)
+                               // at non-Letter w/ chars
+                                       break; // return 'em
+                       }
+                       
+                       termAtt.SetTermLength(length);
+                       offsetAtt.SetOffset(CorrectOffset(start), CorrectOffset(start + length));
+                       return true;
+               }
+               
+               public override void  End()
+               {
+                       // set final offset
+                       int finalOffset = CorrectOffset(offset);
+                       offsetAtt.SetOffset(finalOffset, finalOffset);
+               }
+               
+               /// <deprecated> Will be removed in Lucene 3.0. This method is final, as it should
+               /// not be overridden. Delegates to the backwards compatibility layer. 
+               /// </deprecated>
+        [Obsolete("Will be removed in Lucene 3.0. This method is final, as it should not be overridden. Delegates to the backwards compatibility layer. ")]
+               public override Token Next(Token reusableToken)
+               {
+                       return base.Next(reusableToken);
+               }
+               
+               /// <deprecated> Will be removed in Lucene 3.0. This method is final, as it should
+               /// not be overridden. Delegates to the backwards compatibility layer. 
+               /// </deprecated>
+        [Obsolete("Will be removed in Lucene 3.0. This method is final, as it should not be overridden. Delegates to the backwards compatibility layer. ")]
+               public override Token Next()
+               {
+                       return base.Next();
+               }
+               
+               public override void  Reset(System.IO.TextReader input)
+               {
+                       base.Reset(input);
+                       bufferIndex = 0;
+                       offset = 0;
+                       dataLen = 0;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/CharacterCache.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/CharacterCache.cs
new file mode 100644 (file)
index 0000000..2dab794
--- /dev/null
@@ -0,0 +1,57 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Analysis
+{
+       
+       /// <summary> Replacement for Java 1.5 Character.valueOf()</summary>
+       /// <deprecated> Move to Character.valueOf() in 3.0
+       /// </deprecated>
+    [Obsolete("Move to Character.valueOf() in 3.0")]
+       public class CharacterCache
+       {
+               
+               private static readonly System.Char[] cache = new System.Char[128];
+               
+               /// <summary> Returns a Character instance representing the given char value
+               /// 
+               /// </summary>
+               /// <param name="c">a char value
+               /// </param>
+               /// <returns> a Character representation of the given char value.
+               /// </returns>
+               public static System.Char ValueOf(char c)
+               {
+                       if (c < cache.Length)
+                       {
+                               return cache[(int) c];
+                       }
+                       return c;
+               }
+               static CharacterCache()
+               {
+                       {
+                               for (int i = 0; i < cache.Length; i++)
+                               {
+                                       cache[i] = (char) i;
+                               }
+                       }
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/ISOLatin1AccentFilter.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/ISOLatin1AccentFilter.cs
new file mode 100644 (file)
index 0000000..bad5d91
--- /dev/null
@@ -0,0 +1,362 @@
+๏ปฟ/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using TermAttribute = Mono.Lucene.Net.Analysis.Tokenattributes.TermAttribute;
+
+namespace Mono.Lucene.Net.Analysis
+{
+       
+       /// <summary> A filter that replaces accented characters in the ISO Latin 1 character set 
+       /// (ISO-8859-1) by their unaccented equivalent. The case will not be altered.
+       /// <p/>
+       /// For instance, '&#192;' will be replaced by 'a'.
+       /// <p/>
+       /// 
+       /// </summary>
+       /// <deprecated> in favor of {@link ASCIIFoldingFilter} which covers a superset 
+       /// of Latin 1. This class will be removed in Lucene 3.0.
+       /// </deprecated>
+    [Obsolete("in favor of ASCIIFoldingFilter which covers a superset of Latin 1. This class will be removed in Lucene 3.0.")]
+       public class ISOLatin1AccentFilter:TokenFilter
+       {
+               public ISOLatin1AccentFilter(TokenStream input):base(input)
+               {
+                       termAtt = (TermAttribute) AddAttribute(typeof(TermAttribute));
+               }
+               
+               private char[] output = new char[256];
+               private int outputPos;
+               private TermAttribute termAtt;
+               
+               public override bool IncrementToken()
+               {
+                       if (input.IncrementToken())
+                       {
+                               char[] buffer = termAtt.TermBuffer();
+                               int length = termAtt.TermLength();
+                               // If no characters actually require rewriting then we
+                               // just return token as-is:
+                               for (int i = 0; i < length; i++)
+                               {
+                                       char c = buffer[i];
+                                       if (c >= '\u00c0' && c <= '\uFB06')
+                                       {
+                                               RemoveAccents(buffer, length);
+                                               termAtt.SetTermBuffer(output, 0, outputPos);
+                                               break;
+                                       }
+                               }
+                               return true;
+                       }
+                       else
+                               return false;
+               }
+               
+               /// <deprecated> Will be removed in Lucene 3.0. This method is final, as it should
+               /// not be overridden. Delegates to the backwards compatibility layer. 
+               /// </deprecated>
+        [Obsolete("Will be removed in Lucene 3.0. This method is final, as it should not be overridden. Delegates to the backwards compatibility layer. ")]
+               public override Token Next(Token reusableToken)
+               {
+                       return base.Next(reusableToken);
+               }
+               
+               /// <deprecated> Will be removed in Lucene 3.0. This method is final, as it should
+               /// not be overridden. Delegates to the backwards compatibility layer. 
+               /// </deprecated>
+        [Obsolete("Will be removed in Lucene 3.0. This method is final, as it should not be overridden. Delegates to the backwards compatibility layer. ")]
+               public override Token Next()
+               {
+                       return base.Next();
+               }
+               
+               /// <summary> To replace accented characters in a String by unaccented equivalents.</summary>
+               public void  RemoveAccents(char[] input, int length)
+               {
+                       
+                       // Worst-case length required:
+                       int maxSizeNeeded = 2 * length;
+                       
+                       int size = output.Length;
+                       while (size < maxSizeNeeded)
+                               size *= 2;
+                       
+                       if (size != output.Length)
+                               output = new char[size];
+                       
+                       outputPos = 0;
+                       
+                       int pos = 0;
+                       
+                       for (int i = 0; i < length; i++, pos++)
+                       {
+                               char c = input[pos];
+                               
+                               // Quick test: if it's not in range then just keep
+                               // current character
+                               if (c < '\u00c0' || c > '\uFB06')
+                                       output[outputPos++] = c;
+                               else
+                               {
+                                       switch (c)
+                                       {
+                                               
+                                               case '\u00C0': 
+                                               // รƒโ‚ฌ
+                                               case '\u00C1': 
+                                               // รฏยฟยฝ?
+                                               case '\u00C2': 
+                                               // รƒโ€š
+                                               case '\u00C3': 
+                                               // รƒฦ’
+                                               case '\u00C4': 
+                                               // รƒโ€ž
+                                               case '\u00C5':  // รƒโ€ฆ
+                                                       output[outputPos++] = 'A';
+                                                       break;
+                                               
+                                               case '\u00C6':  // รƒโ€ 
+                                                       output[outputPos++] = 'A';
+                                                       output[outputPos++] = 'E';
+                                                       break;
+                                               
+                                               case '\u00C7':  // รƒโ€ก
+                                                       output[outputPos++] = 'C';
+                                                       break;
+                                               
+                                               case '\u00C8': 
+                                               // รƒห†
+                                               case '\u00C9': 
+                                               // รƒโ€ฐ
+                                               case '\u00CA': 
+                                               // รƒล 
+                                               case '\u00CB':  // รƒโ€น
+                                                       output[outputPos++] = 'E';
+                                                       break;
+                                               
+                                               case '\u00CC': 
+                                               // รƒล’
+                                               case '\u00CD': 
+                                               // รฏยฟยฝ?
+                                               case '\u00CE': 
+                                               // รƒลฝ
+                                               case '\u00CF':  // รฏยฟยฝ?
+                                                       output[outputPos++] = 'I';
+                                                       break;
+                                               
+                                               case '\u0132':  // ร„ยฒ
+                                                       output[outputPos++] = 'I';
+                                                       output[outputPos++] = 'J';
+                                                       break;
+                                               
+                                               case '\u00D0':  // รฏยฟยฝ?
+                                                       output[outputPos++] = 'D';
+                                                       break;
+                                               
+                                               case '\u00D1':  // รƒโ€˜
+                                                       output[outputPos++] = 'N';
+                                                       break;
+                                               
+                                               case '\u00D2': 
+                                               // รƒโ€™
+                                               case '\u00D3': 
+                                               // รƒโ€œ
+                                               case '\u00D4': 
+                                               // รƒโ€
+                                               case '\u00D5': 
+                                               // รƒโ€ข
+                                               case '\u00D6': 
+                                               // รƒโ€“
+                                               case '\u00D8':  // รƒหœ
+                                                       output[outputPos++] = 'O';
+                                                       break;
+                                               
+                                               case '\u0152':  // ร…โ€™
+                                                       output[outputPos++] = 'O';
+                                                       output[outputPos++] = 'E';
+                                                       break;
+                                               
+                                               case '\u00DE':  // รƒลพ
+                                                       output[outputPos++] = 'T';
+                                                       output[outputPos++] = 'H';
+                                                       break;
+                                               
+                                               case '\u00D9': 
+                                               // รƒโ„ข
+                                               case '\u00DA': 
+                                               // รƒลก
+                                               case '\u00DB': 
+                                               // รƒโ€บ
+                                               case '\u00DC':  // รƒล“
+                                                       output[outputPos++] = 'U';
+                                                       break;
+                                               
+                                               case '\u00DD': 
+                                               // รฏยฟยฝ?
+                                               case '\u0178':  // ร…ยธ
+                                                       output[outputPos++] = 'Y';
+                                                       break;
+                                               
+                                               case '\u00E0': 
+                                               // รƒย 
+                                               case '\u00E1': 
+                                               // รƒยก
+                                               case '\u00E2': 
+                                               // รƒยข
+                                               case '\u00E3': 
+                                               // รƒยฃ
+                                               case '\u00E4': 
+                                               // รƒยค
+                                               case '\u00E5':  // รƒยฅ
+                                                       output[outputPos++] = 'a';
+                                                       break;
+                                               
+                                               case '\u00E6':  // รƒยฆ
+                                                       output[outputPos++] = 'a';
+                                                       output[outputPos++] = 'e';
+                                                       break;
+                                               
+                                               case '\u00E7':  // รƒยง
+                                                       output[outputPos++] = 'c';
+                                                       break;
+                                               
+                                               case '\u00E8': 
+                                               // รƒยจ
+                                               case '\u00E9': 
+                                               // รƒยฉ
+                                               case '\u00EA': 
+                                               // รƒยช
+                                               case '\u00EB':  // รƒยซ
+                                                       output[outputPos++] = 'e';
+                                                       break;
+                                               
+                                               case '\u00EC': 
+                                               // รƒยฌ
+                                               case '\u00ED': 
+                                               // รƒยญ
+                                               case '\u00EE': 
+                                               // รƒยฎ
+                                               case '\u00EF':  // รƒยฏ
+                                                       output[outputPos++] = 'i';
+                                                       break;
+                                               
+                                               case '\u0133':  // ร„ยณ
+                                                       output[outputPos++] = 'i';
+                                                       output[outputPos++] = 'j';
+                                                       break;
+                                               
+                                               case '\u00F0':  // รƒยฐ
+                                                       output[outputPos++] = 'd';
+                                                       break;
+                                               
+                                               case '\u00F1':  // รƒยฑ
+                                                       output[outputPos++] = 'n';
+                                                       break;
+                                               
+                                               case '\u00F2': 
+                                               // รƒยฒ
+                                               case '\u00F3': 
+                                               // รƒยณ
+                                               case '\u00F4': 
+                                               // รƒยด
+                                               case '\u00F5': 
+                                               // รƒยต
+                                               case '\u00F6': 
+                                               // รƒยถ
+                                               case '\u00F8':  // รƒยธ
+                                                       output[outputPos++] = 'o';
+                                                       break;
+                                               
+                                               case '\u0153':  // ร…โ€œ
+                                                       output[outputPos++] = 'o';
+                                                       output[outputPos++] = 'e';
+                                                       break;
+                                               
+                                               case '\u00DF':  // รƒลธ
+                                                       output[outputPos++] = 's';
+                                                       output[outputPos++] = 's';
+                                                       break;
+                                               
+                                               case '\u00FE':  // รƒยพ
+                                                       output[outputPos++] = 't';
+                                                       output[outputPos++] = 'h';
+                                                       break;
+                                               
+                                               case '\u00F9': 
+                                               // รƒยน
+                                               case '\u00FA': 
+                                               // รƒยบ
+                                               case '\u00FB': 
+                                               // รƒยป
+                                               case '\u00FC':  // รƒยผ
+                                                       output[outputPos++] = 'u';
+                                                       break;
+                                               
+                                               case '\u00FD': 
+                                               // รƒยฝ
+                                               case '\u00FF':  // รƒยฟ
+                                                       output[outputPos++] = 'y';
+                                                       break;
+                                               
+                                               case '\uFB00':  // รฏยฌโ‚ฌ
+                                                       output[outputPos++] = 'f';
+                                                       output[outputPos++] = 'f';
+                                                       break;
+                                               
+                                               case '\uFB01':  // รฏยฟยฝ?
+                                                       output[outputPos++] = 'f';
+                                                       output[outputPos++] = 'i';
+                                                       break;
+                                               
+                                               case '\uFB02':  // รฏยฌโ€š
+                                                       output[outputPos++] = 'f';
+                                                       output[outputPos++] = 'l';
+                                                       break;
+                                                       // following 2 are commented as they can break the maxSizeNeeded (and doing *3 could be expensive)
+                                                       //        case '\uFB03': // รฏยฌฦ’
+                                                       //            output[outputPos++] = 'f';
+                                                       //            output[outputPos++] = 'f';
+                                                       //            output[outputPos++] = 'i';
+                                                       //            break;
+                                                       //        case '\uFB04': // รฏยฌโ€ž
+                                                       //            output[outputPos++] = 'f';
+                                                       //            output[outputPos++] = 'f';
+                                                       //            output[outputPos++] = 'l';
+                                                       //            break;
+                                               
+                                               case '\uFB05':  // รฏยฌโ€ฆ
+                                                       output[outputPos++] = 'f';
+                                                       output[outputPos++] = 't';
+                                                       break;
+                                               
+                                               case '\uFB06':  // รฏยฌโ€ 
+                                                       output[outputPos++] = 's';
+                                                       output[outputPos++] = 't';
+                                                       break;
+                                               
+                                               default: 
+                                                       output[outputPos++] = c;
+                                                       break;
+                                               
+                                       }
+                               }
+                       }
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/KeywordAnalyzer.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/KeywordAnalyzer.cs
new file mode 100644 (file)
index 0000000..0139232
--- /dev/null
@@ -0,0 +1,56 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Analysis
+{
+       
+       /// <summary> "Tokenizes" the entire stream as a single token. This is useful
+       /// for data like zip codes, ids, and some product names.
+       /// </summary>
+       public class KeywordAnalyzer:Analyzer
+       {
+               public KeywordAnalyzer()
+               {
+                       SetOverridesTokenStreamMethod(typeof(KeywordAnalyzer));
+               }
+               public override TokenStream TokenStream(System.String fieldName, System.IO.TextReader reader)
+               {
+                       return new KeywordTokenizer(reader);
+               }
+               public override TokenStream ReusableTokenStream(System.String fieldName, System.IO.TextReader reader)
+               {
+                       if (overridesTokenStreamMethod)
+                       {
+                               // LUCENE-1678: force fallback to tokenStream() if we
+                               // have been subclassed and that subclass overrides
+                               // tokenStream but not reusableTokenStream
+                               return TokenStream(fieldName, reader);
+                       }
+                       Tokenizer tokenizer = (Tokenizer) GetPreviousTokenStream();
+                       if (tokenizer == null)
+                       {
+                               tokenizer = new KeywordTokenizer(reader);
+                               SetPreviousTokenStream(tokenizer);
+                       }
+                       else
+                               tokenizer.Reset(reader);
+                       return tokenizer;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/KeywordTokenizer.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/KeywordTokenizer.cs
new file mode 100644 (file)
index 0000000..8e9434d
--- /dev/null
@@ -0,0 +1,120 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using OffsetAttribute = Mono.Lucene.Net.Analysis.Tokenattributes.OffsetAttribute;
+using TermAttribute = Mono.Lucene.Net.Analysis.Tokenattributes.TermAttribute;
+using AttributeSource = Mono.Lucene.Net.Util.AttributeSource;
+
+namespace Mono.Lucene.Net.Analysis
+{
+       
+       /// <summary> Emits the entire input as a single token.</summary>
+       public class KeywordTokenizer:Tokenizer
+       {
+               
+               private const int DEFAULT_BUFFER_SIZE = 256;
+               
+               private bool done;
+               private int finalOffset;
+               private TermAttribute termAtt;
+               private OffsetAttribute offsetAtt;
+               
+               public KeywordTokenizer(System.IO.TextReader input):this(input, DEFAULT_BUFFER_SIZE)
+               {
+               }
+               
+               public KeywordTokenizer(System.IO.TextReader input, int bufferSize):base(input)
+               {
+                       Init(bufferSize);
+               }
+               
+               public KeywordTokenizer(AttributeSource source, System.IO.TextReader input, int bufferSize):base(source, input)
+               {
+                       Init(bufferSize);
+               }
+               
+               public KeywordTokenizer(AttributeFactory factory, System.IO.TextReader input, int bufferSize):base(factory, input)
+               {
+                       Init(bufferSize);
+               }
+               
+               private void  Init(int bufferSize)
+               {
+                       this.done = false;
+                       termAtt = (TermAttribute) AddAttribute(typeof(TermAttribute));
+                       offsetAtt = (OffsetAttribute) AddAttribute(typeof(OffsetAttribute));
+                       termAtt.ResizeTermBuffer(bufferSize);
+               }
+               
+               public override bool IncrementToken()
+               {
+                       if (!done)
+                       {
+                               ClearAttributes();
+                               done = true;
+                               int upto = 0;
+                               char[] buffer = termAtt.TermBuffer();
+                               while (true)
+                               {
+                                       int length = input.Read(buffer, upto, buffer.Length - upto);
+                                       if (length == 0)
+                                               break;
+                                       upto += length;
+                                       if (upto == buffer.Length)
+                                               buffer = termAtt.ResizeTermBuffer(1 + buffer.Length);
+                               }
+                               termAtt.SetTermLength(upto);
+                               finalOffset = CorrectOffset(upto);
+                               offsetAtt.SetOffset(CorrectOffset(0), finalOffset);
+                               return true;
+                       }
+                       return false;
+               }
+               
+               public override void  End()
+               {
+                       // set final offset 
+                       offsetAtt.SetOffset(finalOffset, finalOffset);
+               }
+               
+               /// <deprecated> Will be removed in Lucene 3.0. This method is final, as it should
+               /// not be overridden. Delegates to the backwards compatibility layer. 
+               /// </deprecated>
+        [Obsolete("Will be removed in Lucene 3.0. This method is final, as it should not be overridden. Delegates to the backwards compatibility layer. ")]
+               public override Token Next(Token reusableToken)
+               {
+                       return base.Next(reusableToken);
+               }
+               
+               /// <deprecated> Will be removed in Lucene 3.0. This method is final, as it should
+               /// not be overridden. Delegates to the backwards compatibility layer. 
+               /// </deprecated>
+        [Obsolete("Will be removed in Lucene 3.0. This method is final, as it should not be overridden. Delegates to the backwards compatibility layer. ")]
+               public override Token Next()
+               {
+                       return base.Next();
+               }
+               
+               public override void  Reset(System.IO.TextReader input)
+               {
+                       base.Reset(input);
+                       this.done = false;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/LengthFilter.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/LengthFilter.cs
new file mode 100644 (file)
index 0000000..f27ded7
--- /dev/null
@@ -0,0 +1,66 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using TermAttribute = Mono.Lucene.Net.Analysis.Tokenattributes.TermAttribute;
+
+namespace Mono.Lucene.Net.Analysis
+{
+       
+       /// <summary> Removes words that are too long or too short from the stream.
+       /// 
+       /// 
+       /// </summary>
+       /// <version>  $Id: LengthFilter.java 807201 2009-08-24 13:22:34Z markrmiller $
+       /// </version>
+       public sealed class LengthFilter:TokenFilter
+       {
+               
+               internal int min;
+               internal int max;
+               
+               private TermAttribute termAtt;
+               
+               /// <summary> Build a filter that removes words that are too long or too
+               /// short from the text.
+               /// </summary>
+               public LengthFilter(TokenStream in_Renamed, int min, int max):base(in_Renamed)
+               {
+                       this.min = min;
+                       this.max = max;
+                       termAtt = (TermAttribute) AddAttribute(typeof(TermAttribute));
+               }
+               
+               /// <summary> Returns the next input Token whose term() is the right len</summary>
+               public override bool IncrementToken()
+               {
+                       // return the first non-stop word found
+                       while (input.IncrementToken())
+                       {
+                               int len = termAtt.TermLength();
+                               if (len >= min && len <= max)
+                               {
+                                       return true;
+                               }
+                               // note: else we ignore it but should we index each part of it?
+                       }
+                       // reached EOS -- return null
+                       return false;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/LetterTokenizer.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/LetterTokenizer.cs
new file mode 100644 (file)
index 0000000..3ca8b88
--- /dev/null
@@ -0,0 +1,57 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using AttributeSource = Mono.Lucene.Net.Util.AttributeSource;
+
+namespace Mono.Lucene.Net.Analysis
+{
+       
+       /// <summary>A LetterTokenizer is a tokenizer that divides text at non-letters.  That's
+       /// to say, it defines tokens as maximal strings of adjacent letters, as defined
+       /// by java.lang.Character.isLetter() predicate.
+       /// Note: this does a decent job for most European languages, but does a terrible
+       /// job for some Asian languages, where words are not separated by spaces. 
+       /// </summary>
+       
+       public class LetterTokenizer:CharTokenizer
+       {
+               /// <summary>Construct a new LetterTokenizer. </summary>
+               public LetterTokenizer(System.IO.TextReader in_Renamed):base(in_Renamed)
+               {
+               }
+               
+               /// <summary>Construct a new LetterTokenizer using a given {@link AttributeSource}. </summary>
+               public LetterTokenizer(AttributeSource source, System.IO.TextReader in_Renamed):base(source, in_Renamed)
+               {
+               }
+               
+               /// <summary>Construct a new LetterTokenizer using a given {@link Mono.Lucene.Net.Util.AttributeSource.AttributeFactory}. </summary>
+               public LetterTokenizer(AttributeFactory factory, System.IO.TextReader in_Renamed):base(factory, in_Renamed)
+               {
+               }
+               
+               /// <summary>Collects only characters which satisfy
+               /// {@link Character#isLetter(char)}.
+               /// </summary>
+               protected internal override bool IsTokenChar(char c)
+               {
+                       return System.Char.IsLetter(c);
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/LowerCaseFilter.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/LowerCaseFilter.cs
new file mode 100644 (file)
index 0000000..bb22ad7
--- /dev/null
@@ -0,0 +1,55 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using TermAttribute = Mono.Lucene.Net.Analysis.Tokenattributes.TermAttribute;
+
+namespace Mono.Lucene.Net.Analysis
+{
+       
+       /// <summary> Normalizes token text to lower case.
+       /// 
+       /// </summary>
+       /// <version>  $Id: LowerCaseFilter.java 797665 2009-07-24 21:45:48Z buschmi $
+       /// </version>
+       public sealed class LowerCaseFilter:TokenFilter
+       {
+               public LowerCaseFilter(TokenStream in_Renamed):base(in_Renamed)
+               {
+                       termAtt = (TermAttribute) AddAttribute(typeof(TermAttribute));
+               }
+               
+               private TermAttribute termAtt;
+               
+               public override bool IncrementToken()
+               {
+                       if (input.IncrementToken())
+                       {
+                               
+                               char[] buffer = termAtt.TermBuffer();
+                               int length = termAtt.TermLength();
+                               for (int i = 0; i < length; i++)
+                                       buffer[i] = System.Char.ToLower(buffer[i]);
+                               
+                               return true;
+                       }
+                       else
+                               return false;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/LowerCaseTokenizer.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/LowerCaseTokenizer.cs
new file mode 100644 (file)
index 0000000..28df129
--- /dev/null
@@ -0,0 +1,59 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using AttributeSource = Mono.Lucene.Net.Util.AttributeSource;
+
+namespace Mono.Lucene.Net.Analysis
+{
+       
+       /// <summary> LowerCaseTokenizer performs the function of LetterTokenizer
+       /// and LowerCaseFilter together.  It divides text at non-letters and converts
+       /// them to lower case.  While it is functionally equivalent to the combination
+       /// of LetterTokenizer and LowerCaseFilter, there is a performance advantage
+       /// to doing the two tasks at once, hence this (redundant) implementation.
+       /// <p/>
+       /// Note: this does a decent job for most European languages, but does a terrible
+       /// job for some Asian languages, where words are not separated by spaces.
+       /// </summary>
+       public sealed class LowerCaseTokenizer:LetterTokenizer
+       {
+               /// <summary>Construct a new LowerCaseTokenizer. </summary>
+               public LowerCaseTokenizer(System.IO.TextReader in_Renamed):base(in_Renamed)
+               {
+               }
+               
+               /// <summary>Construct a new LowerCaseTokenizer using a given {@link AttributeSource}. </summary>
+               public LowerCaseTokenizer(AttributeSource source, System.IO.TextReader in_Renamed):base(source, in_Renamed)
+               {
+               }
+               
+               /// <summary>Construct a new LowerCaseTokenizer using a given {@link Mono.Lucene.Net.Util.AttributeSource.AttributeFactory}. </summary>
+               public LowerCaseTokenizer(AttributeFactory factory, System.IO.TextReader in_Renamed):base(factory, in_Renamed)
+               {
+               }
+               
+               /// <summary>Converts char to lower case
+               /// {@link Character#toLowerCase(char)}.
+               /// </summary>
+               protected internal override char Normalize(char c)
+               {
+                       return System.Char.ToLower(c);
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/MappingCharFilter.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/MappingCharFilter.cs
new file mode 100644 (file)
index 0000000..7a8cee6
--- /dev/null
@@ -0,0 +1,165 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Analysis
+{
+       
+       /// <summary> Simplistic {@link CharFilter} that applies the mappings
+       /// contained in a {@link NormalizeCharMap} to the character
+       /// stream, and correcting the resulting changes to the
+       /// offsets.
+       /// </summary>
+       public class MappingCharFilter:BaseCharFilter
+       {
+               
+               private NormalizeCharMap normMap;
+               //private LinkedList<Character> buffer;
+               private System.Collections.ArrayList buffer;
+               private System.String replacement;
+               private int charPointer;
+               private int nextCharCounter;
+               
+               /// Default constructor that takes a {@link CharStream}.
+               public MappingCharFilter(NormalizeCharMap normMap, CharStream in_Renamed):base(in_Renamed)
+               {
+                       this.normMap = normMap;
+               }
+               
+               /// Easy-use constructor that takes a {@link Reader}.
+               public MappingCharFilter(NormalizeCharMap normMap, System.IO.TextReader in_Renamed):base(CharReader.Get(in_Renamed))
+               {
+                       this.normMap = normMap;
+               }
+               
+               public  override int Read()
+               {
+                       while (true)
+                       {
+                               if (replacement != null && charPointer < replacement.Length)
+                               {
+                                       return replacement[charPointer++];
+                               }
+                               
+                               int firstChar = NextChar();
+                               if (firstChar == - 1)
+                                       return - 1;
+                               NormalizeCharMap nm = normMap.submap != null?(NormalizeCharMap) normMap.submap[CharacterCache.ValueOf((char) firstChar)]:null;
+                               if (nm == null)
+                                       return firstChar;
+                               NormalizeCharMap result = Match(nm);
+                               if (result == null)
+                                       return firstChar;
+                               replacement = result.normStr;
+                               charPointer = 0;
+                               if (result.diff != 0)
+                               {
+                                       int prevCumulativeDiff = GetLastCumulativeDiff();
+                                       if (result.diff < 0)
+                                       {
+                                               for (int i = 0; i < - result.diff; i++)
+                                                       AddOffCorrectMap(nextCharCounter + i - prevCumulativeDiff, prevCumulativeDiff - 1 - i);
+                                       }
+                                       else
+                                       {
+                                               AddOffCorrectMap(nextCharCounter - result.diff - prevCumulativeDiff, prevCumulativeDiff + result.diff);
+                                       }
+                               }
+                       }
+               }
+               
+               private int NextChar()
+               {
+                       nextCharCounter++;
+                       if (buffer != null && !(buffer.Count == 0))
+                       {
+                               System.Object tempObject;
+                               tempObject = buffer[0];
+                               buffer.RemoveAt(0);
+                               return ((System.Char) tempObject);
+                       }
+                       return input.Read();
+               }
+               
+               private void  PushChar(int c)
+               {
+                       nextCharCounter--;
+                       if (buffer == null)
+                       {
+                               buffer = new System.Collections.ArrayList();
+                       }
+                       buffer.Insert(0, (char) c);
+               }
+               
+               private void  PushLastChar(int c)
+               {
+                       if (buffer == null)
+                       {
+                               buffer = new System.Collections.ArrayList();
+                       }
+                       buffer.Insert(buffer.Count, (char) c);
+               }
+               
+               private NormalizeCharMap Match(NormalizeCharMap map)
+               {
+                       NormalizeCharMap result = null;
+                       if (map.submap != null)
+                       {
+                               int chr = NextChar();
+                               if (chr != - 1)
+                               {
+                                       NormalizeCharMap subMap = (NormalizeCharMap) map.submap[CharacterCache.ValueOf((char) chr)];
+                                       if (subMap != null)
+                                       {
+                                               result = Match(subMap);
+                                       }
+                                       if (result == null)
+                                       {
+                                               PushChar(chr);
+                                       }
+                               }
+                       }
+                       if (result == null && map.normStr != null)
+                       {
+                               result = map;
+                       }
+                       return result;
+               }
+               
+               public  override int Read(System.Char[] cbuf, int off, int len)
+               {
+                       char[] tmp = new char[len];
+                       int l = input.Read(tmp, 0, len);
+                       if (l != 0)
+                       {
+                               for (int i = 0; i < l; i++)
+                                       PushLastChar(tmp[i]);
+                       }
+                       l = 0;
+                       for (int i = off; i < off + len; i++)
+                       {
+                               int c = Read();
+                               if (c == - 1)
+                                       break;
+                               cbuf[i] = (char) c;
+                               l++;
+                       }
+                       return l == 0?- 1:l;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/NormalizeCharMap.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/NormalizeCharMap.cs
new file mode 100644 (file)
index 0000000..b923a55
--- /dev/null
@@ -0,0 +1,70 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Analysis
+{
+       
+       /// <summary> Holds a map of String input to String output, to be used
+       /// with {@link MappingCharFilter}.
+       /// </summary>
+       public class NormalizeCharMap
+       {
+               
+               //Map<Character, NormalizeMap> submap;
+               internal System.Collections.IDictionary submap;
+               internal System.String normStr;
+               internal int diff;
+               
+               /// <summary>Records a replacement to be applied to the inputs
+               /// stream.  Whenever <code>singleMatch</code> occurs in
+               /// the input, it will be replaced with
+               /// <code>replacement</code>.
+               /// 
+               /// </summary>
+               /// <param name="singleMatch">input String to be replaced
+               /// </param>
+               /// <param name="replacement">output String
+               /// </param>
+               public virtual void  Add(System.String singleMatch, System.String replacement)
+               {
+                       NormalizeCharMap currMap = this;
+                       for (int i = 0; i < singleMatch.Length; i++)
+                       {
+                               char c = singleMatch[i];
+                               if (currMap.submap == null)
+                               {
+                                       currMap.submap = new System.Collections.Hashtable(1);
+                               }
+                               NormalizeCharMap map = (NormalizeCharMap) currMap.submap[CharacterCache.ValueOf(c)];
+                               if (map == null)
+                               {
+                                       map = new NormalizeCharMap();
+                                       currMap.submap[c] = map;
+                               }
+                               currMap = map;
+                       }
+                       if (currMap.normStr != null)
+                       {
+                               throw new System.SystemException("MappingCharFilter: there is already a mapping for " + singleMatch);
+                       }
+                       currMap.normStr = replacement;
+                       currMap.diff = singleMatch.Length - replacement.Length;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/NumericTokenStream.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/NumericTokenStream.cs
new file mode 100644 (file)
index 0000000..252bf86
--- /dev/null
@@ -0,0 +1,276 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using AttributeSource = Mono.Lucene.Net.Util.AttributeSource;
+using NumericUtils = Mono.Lucene.Net.Util.NumericUtils;
+using NumericField = Mono.Lucene.Net.Documents.NumericField;
+// for javadocs
+using NumericRangeQuery = Mono.Lucene.Net.Search.NumericRangeQuery;
+using NumericRangeFilter = Mono.Lucene.Net.Search.NumericRangeFilter;
+using SortField = Mono.Lucene.Net.Search.SortField;
+using FieldCache = Mono.Lucene.Net.Search.FieldCache;
+// javadocs
+using TermAttribute = Mono.Lucene.Net.Analysis.Tokenattributes.TermAttribute;
+using TypeAttribute = Mono.Lucene.Net.Analysis.Tokenattributes.TypeAttribute;
+using PositionIncrementAttribute = Mono.Lucene.Net.Analysis.Tokenattributes.PositionIncrementAttribute;
+
+namespace Mono.Lucene.Net.Analysis
+{
+       
+       /// <summary> <b>Expert:</b> This class provides a {@link TokenStream}
+       /// for indexing numeric values that can be used by {@link
+       /// NumericRangeQuery} or {@link NumericRangeFilter}.
+       /// 
+       /// <p/>Note that for simple usage, {@link NumericField} is
+       /// recommended.  {@link NumericField} disables norms and
+       /// term freqs, as they are not usually needed during
+       /// searching.  If you need to change these settings, you
+       /// should use this class.
+       /// 
+       /// <p/>See {@link NumericField} for capabilities of fields
+       /// indexed numerically.<p/>
+       /// 
+       /// <p/>Here's an example usage, for an <code>int</code> field:
+       /// 
+       /// <pre>
+       ///  Field field = new Field(name, new NumericTokenStream(precisionStep).setIntValue(value));
+       ///  field.setOmitNorms(true);
+       ///  field.setOmitTermFreqAndPositions(true);
+       ///  document.add(field);
+       /// </pre>
+       /// 
+       /// <p/>For optimal performance, re-use the TokenStream and Field instance
+       /// for more than one document:
+       /// 
+       /// <pre>
+       ///  NumericTokenStream stream = new NumericTokenStream(precisionStep);
+       ///  Field field = new Field(name, stream);
+       ///  field.setOmitNorms(true);
+       ///  field.setOmitTermFreqAndPositions(true);
+       ///  Document document = new Document();
+       ///  document.add(field);
+       /// 
+       ///  for(all documents) {
+       ///    stream.setIntValue(value)
+       ///    writer.addDocument(document);
+       ///  }
+       /// </pre>
+       /// 
+       /// <p/>This stream is not intended to be used in analyzers;
+       /// it's more for iterating the different precisions during
+       /// indexing a specific numeric value.<p/>
+       /// 
+       /// <p/><b>NOTE</b>: as token streams are only consumed once
+       /// the document is added to the index, if you index more
+       /// than one numeric field, use a separate <code>NumericTokenStream</code>
+       /// instance for each.<p/>
+       /// 
+       /// <p/>See {@link NumericRangeQuery} for more details on the
+       /// <a
+       /// href="../search/NumericRangeQuery.html#precisionStepDesc"><code>precisionStep</code></a>
+       /// parameter as well as how numeric fields work under the hood.<p/>
+       /// 
+       /// <p/><font color="red"><b>NOTE:</b> This API is experimental and
+       /// might change in incompatible ways in the next release.</font>
+       /// 
+       /// </summary>
+       /// <since> 2.9
+       /// </since>
+       public sealed class NumericTokenStream:TokenStream
+       {
+               private void  InitBlock()
+               {
+                       termAtt = (TermAttribute) AddAttribute(typeof(TermAttribute));
+                       typeAtt = (TypeAttribute) AddAttribute(typeof(TypeAttribute));
+                       posIncrAtt = (PositionIncrementAttribute) AddAttribute(typeof(PositionIncrementAttribute));
+               }
+               
+               /// <summary>The full precision token gets this token type assigned. </summary>
+               public const System.String TOKEN_TYPE_FULL_PREC = "fullPrecNumeric";
+               
+               /// <summary>The lower precision tokens gets this token type assigned. </summary>
+               public const System.String TOKEN_TYPE_LOWER_PREC = "lowerPrecNumeric";
+               
+               /// <summary> Creates a token stream for numeric values using the default <code>precisionStep</code>
+               /// {@link NumericUtils#PRECISION_STEP_DEFAULT} (4). The stream is not yet initialized,
+               /// before using set a value using the various set<em>???</em>Value() methods.
+               /// </summary>
+               public NumericTokenStream():this(NumericUtils.PRECISION_STEP_DEFAULT)
+               {
+               }
+               
+               /// <summary> Creates a token stream for numeric values with the specified
+               /// <code>precisionStep</code>. The stream is not yet initialized,
+               /// before using set a value using the various set<em>???</em>Value() methods.
+               /// </summary>
+               public NumericTokenStream(int precisionStep):base()
+               {
+                       InitBlock();
+                       this.precisionStep = precisionStep;
+                       if (precisionStep < 1)
+                               throw new System.ArgumentException("precisionStep must be >=1");
+               }
+               
+               /// <summary> Expert: Creates a token stream for numeric values with the specified
+               /// <code>precisionStep</code> using the given {@link AttributeSource}.
+               /// The stream is not yet initialized,
+               /// before using set a value using the various set<em>???</em>Value() methods.
+               /// </summary>
+               public NumericTokenStream(AttributeSource source, int precisionStep):base(source)
+               {
+                       InitBlock();
+                       this.precisionStep = precisionStep;
+                       if (precisionStep < 1)
+                               throw new System.ArgumentException("precisionStep must be >=1");
+               }
+               
+               /// <summary> Expert: Creates a token stream for numeric values with the specified
+               /// <code>precisionStep</code> using the given
+               /// {@link org.apache.lucene.util.AttributeSource.AttributeFactory}.
+               /// The stream is not yet initialized,
+               /// before using set a value using the various set<em>???</em>Value() methods.
+               /// </summary>
+               public NumericTokenStream(AttributeFactory factory, int precisionStep):base(factory)
+               {
+                       InitBlock();
+                       this.precisionStep = precisionStep;
+                       if (precisionStep < 1)
+                               throw new System.ArgumentException("precisionStep must be >=1");
+               }
+               
+               /// <summary> Initializes the token stream with the supplied <code>long</code> value.</summary>
+               /// <param name="value">the value, for which this TokenStream should enumerate tokens.
+               /// </param>
+               /// <returns> this instance, because of this you can use it the following way:
+               /// <code>new Field(name, new NumericTokenStream(precisionStep).SetLongValue(value))</code>
+               /// </returns>
+               public NumericTokenStream SetLongValue(long value_Renamed)
+               {
+                       this.value_Renamed = value_Renamed;
+                       valSize = 64;
+                       shift = 0;
+                       return this;
+               }
+               
+               /// <summary> Initializes the token stream with the supplied <code>int</code> value.</summary>
+               /// <param name="value">the value, for which this TokenStream should enumerate tokens.
+               /// </param>
+               /// <returns> this instance, because of this you can use it the following way:
+               /// <code>new Field(name, new NumericTokenStream(precisionStep).SetIntValue(value))</code>
+               /// </returns>
+               public NumericTokenStream SetIntValue(int value_Renamed)
+               {
+                       this.value_Renamed = (long) value_Renamed;
+                       valSize = 32;
+                       shift = 0;
+                       return this;
+               }
+               
+               /// <summary> Initializes the token stream with the supplied <code>double</code> value.</summary>
+               /// <param name="value">the value, for which this TokenStream should enumerate tokens.
+               /// </param>
+               /// <returns> this instance, because of this you can use it the following way:
+               /// <code>new Field(name, new NumericTokenStream(precisionStep).SetDoubleValue(value))</code>
+               /// </returns>
+               public NumericTokenStream SetDoubleValue(double value_Renamed)
+               {
+                       this.value_Renamed = NumericUtils.DoubleToSortableLong(value_Renamed);
+                       valSize = 64;
+                       shift = 0;
+                       return this;
+               }
+               
+               /// <summary> Initializes the token stream with the supplied <code>float</code> value.</summary>
+               /// <param name="value">the value, for which this TokenStream should enumerate tokens.
+               /// </param>
+               /// <returns> this instance, because of this you can use it the following way:
+               /// <code>new Field(name, new NumericTokenStream(precisionStep).SetFloatValue(value))</code>
+               /// </returns>
+               public NumericTokenStream SetFloatValue(float value_Renamed)
+               {
+                       this.value_Renamed = (long) NumericUtils.FloatToSortableInt(value_Renamed);
+                       valSize = 32;
+                       shift = 0;
+                       return this;
+               }
+               
+               // @Override
+               public override void  Reset()
+               {
+                       if (valSize == 0)
+                               throw new System.SystemException("call set???Value() before usage");
+                       shift = 0;
+               }
+               
+               // @Override
+               public override bool IncrementToken()
+               {
+                       if (valSize == 0)
+                               throw new System.SystemException("call set???Value() before usage");
+                       if (shift >= valSize)
+                               return false;
+                       
+                       ClearAttributes();
+                       char[] buffer;
+                       switch (valSize)
+                       {
+                               
+                               case 64: 
+                                       buffer = termAtt.ResizeTermBuffer(NumericUtils.BUF_SIZE_LONG);
+                                       termAtt.SetTermLength(NumericUtils.LongToPrefixCoded(value_Renamed, shift, buffer));
+                                       break;
+                               
+                               
+                               case 32: 
+                                       buffer = termAtt.ResizeTermBuffer(NumericUtils.BUF_SIZE_INT);
+                                       termAtt.SetTermLength(NumericUtils.IntToPrefixCoded((int) value_Renamed, shift, buffer));
+                                       break;
+                               
+                               
+                               default: 
+                                       // should not happen
+                                       throw new System.ArgumentException("valSize must be 32 or 64");
+                               
+                       }
+                       
+                       typeAtt.SetType((shift == 0)?TOKEN_TYPE_FULL_PREC:TOKEN_TYPE_LOWER_PREC);
+                       posIncrAtt.SetPositionIncrement((shift == 0)?1:0);
+                       shift += precisionStep;
+                       return true;
+               }
+               
+               // @Override
+               public override System.String ToString()
+               {
+                       System.Text.StringBuilder sb = new System.Text.StringBuilder("(numeric,valSize=").Append(valSize);
+                       sb.Append(",precisionStep=").Append(precisionStep).Append(')');
+                       return sb.ToString();
+               }
+               
+               // members
+               private TermAttribute termAtt;
+               private TypeAttribute typeAtt;
+               private PositionIncrementAttribute posIncrAtt;
+               
+               private int shift = 0, valSize = 0; // valSize==0 means not initialized
+               private int precisionStep;
+               
+               private long value_Renamed = 0L;
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/Package.html b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/Package.html
new file mode 100644 (file)
index 0000000..840c51c
--- /dev/null
@@ -0,0 +1,636 @@
+<!doctype html public "-//w3c//dtd html 4.0 transitional//en">\r
+<!--\r
+ Licensed to the Apache Software Foundation (ASF) under one or more\r
+ contributor license agreements.  See the NOTICE file distributed with\r
+ this work for additional information regarding copyright ownership.\r
+ The ASF licenses this file to You under the Apache License, Version 2.0\r
+ (the "License"); you may not use this file except in compliance with\r
+ the License.  You may obtain a copy of the License at\r
+\r
+     http://www.apache.org/licenses/LICENSE-2.0\r
+\r
+ Unless required by applicable law or agreed to in writing, software\r
+ distributed under the License is distributed on an "AS IS" BASIS,\r
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
+ See the License for the specific language governing permissions and\r
+ limitations under the License.\r
+-->\r
+<html>\r
+<head>\r
+   <meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">\r
+</head>\r
+<body>\r
+<p>API and code to convert text into indexable/searchable tokens.  Covers {@link Lucene.Net.Analysis.Analyzer} and related classes.</p>\r
+<h2>Parsing? Tokenization? Analysis!</h2>\r
+<p>\r
+Lucene, indexing and search library, accepts only plain text input.\r
+<p>\r
+<h2>Parsing</h2>\r
+<p>\r
+Applications that build their search capabilities upon Lucene may support documents in various formats &ndash; HTML, XML, PDF, Word &ndash; just to name a few.\r
+Lucene does not care about the <i>Parsing</i> of these and other document formats, and it is the responsibility of the \r
+application using Lucene to use an appropriate <i>Parser</i> to convert the original format into plain text before passing that plain text to Lucene.\r
+<p>\r
+<h2>Tokenization</h2>\r
+<p>\r
+Plain text passed to Lucene for indexing goes through a process generally called tokenization. Tokenization is the process\r
+of breaking input text into small indexing elements &ndash; tokens.\r
+The way input text is broken into tokens heavily influences how people will then be able to search for that text. \r
+For instance, sentences beginnings and endings can be identified to provide for more accurate phrase \r
+and proximity searches (though sentence identification is not provided by Lucene).\r
+<p>\r
+In some cases simply breaking the input text into tokens is not enough &ndash; a deeper <i>Analysis</i> may be needed.\r
+There are many post tokenization steps that can be done, including (but not limited to):\r
+<ul>\r
+  <li><a href = "http://en.wikipedia.org//wiki/Stemming">Stemming</a> &ndash; \r
+      Replacing of words by their stems. \r
+      For instance with English stemming "bikes" is replaced by "bike"; \r
+      now query "bike" can find both documents containing "bike" and those containing "bikes".\r
+  </li>\r
+  <li><a href = "http://en.wikipedia.org//wiki/Stop_words">Stop Words Filtering</a> &ndash; \r
+      Common words like "the", "and" and "a" rarely add any value to a search.\r
+      Removing them shrinks the index size and increases performance.\r
+      It may also reduce some "noise" and actually improve search quality.\r
+  </li>\r
+  <li><a href = "http://en.wikipedia.org//wiki/Text_normalization">Text Normalization</a> &ndash; \r
+      Stripping accents and other character markings can make for better searching.\r
+  </li>\r
+  <li><a href = "http://en.wikipedia.org//wiki/Synonym">Synonym Expansion</a> &ndash; \r
+      Adding in synonyms at the same token position as the current word can mean better \r
+      matching when users search with words in the synonym set.\r
+  </li>\r
+</ul> \r
+<p>\r
+<h2>Core Analysis</h2>\r
+<p>\r
+  The analysis package provides the mechanism to convert Strings and Readers into tokens that can be indexed by Lucene.  There\r
+  are three main classes in the package from which all analysis processes are derived.  These are:\r
+  <ul>\r
+    <li>{@link Lucene.Net.Analysis.Analyzer} &ndash; An Analyzer is responsible for building a {@link Lucene.Net.Analysis.TokenStream} which can be consumed\r
+    by the indexing and searching processes.  See below for more information on implementing your own Analyzer.</li>\r
+    <li>{@link Lucene.Net.Analysis.Tokenizer} &ndash; A Tokenizer is a {@link Lucene.Net.Analysis.TokenStream} and is responsible for breaking\r
+    up incoming text into tokens. In most cases, an Analyzer will use a Tokenizer as the first step in\r
+    the analysis process.</li>\r
+    <li>{@link Lucene.Net.Analysis.TokenFilter} &ndash; A TokenFilter is also a {@link Lucene.Net.Analysis.TokenStream} and is responsible\r
+    for modifying tokens that have been created by the Tokenizer.  Common modifications performed by a\r
+    TokenFilter are: deletion, stemming, synonym injection, and down casing.  Not all Analyzers require TokenFilters</li>\r
+  </ul>\r
+  <b>Lucene 2.9 introduces a new TokenStream API. Please see the section "New TokenStream API" below for more details.</b>\r
+</p>\r
+<h2>Hints, Tips and Traps</h2>\r
+<p>\r
+   The synergy between {@link Lucene.Net.Analysis.Analyzer} and {@link Lucene.Net.Analysis.Tokenizer}\r
+   is sometimes confusing. To ease on this confusion, some clarifications:\r
+   <ul>\r
+      <li>The {@link Lucene.Net.Analysis.Analyzer} is responsible for the entire task of \r
+          <u>creating</u> tokens out of the input text, while the {@link Lucene.Net.Analysis.Tokenizer}\r
+          is only responsible for <u>breaking</u> the input text into tokens. Very likely, tokens created \r
+          by the {@link Lucene.Net.Analysis.Tokenizer} would be modified or even omitted \r
+          by the {@link Lucene.Net.Analysis.Analyzer} (via one or more\r
+          {@link Lucene.Net.Analysis.TokenFilter}s) before being returned.\r
+       </li>\r
+       <li>{@link Lucene.Net.Analysis.Tokenizer} is a {@link Lucene.Net.Analysis.TokenStream}, \r
+           but {@link Lucene.Net.Analysis.Analyzer} is not.\r
+       </li>\r
+       <li>{@link Lucene.Net.Analysis.Analyzer} is "field aware", but \r
+           {@link Lucene.Net.Analysis.Tokenizer} is not.\r
+       </li>\r
+   </ul>\r
+</p>\r
+<p>\r
+  Lucene Java provides a number of analysis capabilities, the most commonly used one being the {@link\r
+  Lucene.Net.Analysis.Standard.StandardAnalyzer}.  Many applications will have a long and industrious life with nothing more\r
+  than the StandardAnalyzer.  However, there are a few other classes/packages that are worth mentioning:\r
+  <ol>\r
+    <li>{@link Lucene.Net.Analysis.PerFieldAnalyzerWrapper} &ndash; Most Analyzers perform the same operation on all\r
+      {@link Lucene.Net.Documents.Field}s.  The PerFieldAnalyzerWrapper can be used to associate a different Analyzer with different\r
+      {@link Lucene.Net.Documents.Field}s.</li>\r
+    <li>The contrib/analyzers library located at the root of the Lucene distribution has a number of different Analyzer implementations to solve a variety\r
+    of different problems related to searching.  Many of the Analyzers are designed to analyze non-English languages.</li>\r
+    <li>The contrib/snowball library \r
+        located at the root of the Lucene distribution has Analyzer and TokenFilter \r
+        implementations for a variety of Snowball stemmers.  \r
+        See <a href = "http://snowball.tartarus.org">http://snowball.tartarus.org</a> \r
+        for more information on Snowball stemmers.</li>\r
+    <li>There are a variety of Tokenizer and TokenFilter implementations in this package.  Take a look around, chances are someone has implemented what you need.</li>\r
+  </ol>\r
+</p>\r
+<p>\r
+  Analysis is one of the main causes of performance degradation during indexing.  Simply put, the more you analyze the slower the indexing (in most cases).\r
+  Perhaps your application would be just fine using the simple {@link Lucene.Net.Analysis.WhitespaceTokenizer} combined with a\r
+  {@link Lucene.Net.Analysis.StopFilter}. The contrib/benchmark library can be useful for testing out the speed of the analysis process.\r
+</p>\r
+<h2>Invoking the Analyzer</h2>\r
+<p>\r
+  Applications usually do not invoke analysis &ndash; Lucene does it for them:\r
+  <ul>\r
+    <li>At indexing, as a consequence of \r
+        {@link Lucene.Net.Index.IndexWriter#addDocument(Lucene.Net.Documents.Document) addDocument(doc)},\r
+        the Analyzer in effect for indexing is invoked for each indexed field of the added document.\r
+    </li>\r
+    <li>At search, as a consequence of\r
+        {@link Lucene.Net.QueryParsers.QueryParser#parse(java.lang.String) QueryParser.parse(queryText)},\r
+        the QueryParser may invoke the Analyzer in effect.\r
+        Note that for some queries analysis does not take place, e.g. wildcard queries.\r
+    </li>\r
+  </ul>\r
+  However an application might invoke Analysis of any text for testing or for any other purpose, something like:\r
+  <PRE>\r
+      Analyzer analyzer = new StandardAnalyzer(); // or any other analyzer\r
+      TokenStream ts = analyzer.tokenStream("myfield",new StringReader("some text goes here"));\r
+      while (ts.incrementToken()) {\r
+        System.out.println("token: "+ts));\r
+        t = ts.next();\r
+      }\r
+  </PRE>\r
+</p>\r
+<h2>Indexing Analysis vs. Search Analysis</h2>\r
+<p>\r
+  Selecting the "correct" analyzer is crucial\r
+  for search quality, and can also affect indexing and search performance.\r
+  The "correct" analyzer differs between applications.\r
+  Lucene java's wiki page \r
+  <a href = "http://wiki.apache.org//lucene-java/AnalysisParalysis">AnalysisParalysis</a> \r
+  provides some data on "analyzing your analyzer".\r
+  Here are some rules of thumb:\r
+  <ol>\r
+    <li>Test test test... (did we say test?)</li>\r
+    <li>Beware of over analysis &ndash; might hurt indexing performance.</li>\r
+    <li>Start with same analyzer for indexing and search, otherwise searches would not find what they are supposed to...</li>\r
+    <li>In some cases a different analyzer is required for indexing and search, for instance:\r
+        <ul>\r
+           <li>Certain searches require more stop words to be filtered. (I.e. more than those that were filtered at indexing.)</li>\r
+           <li>Query expansion by synonyms, acronyms, auto spell correction, etc.</li>\r
+        </ul>\r
+        This might sometimes require a modified analyzer &ndash; see the next section on how to do that.\r
+    </li>\r
+  </ol>\r
+</p>\r
+<h2>Implementing your own Analyzer</h2>\r
+<p>Creating your own Analyzer is straightforward. It usually involves either wrapping an existing Tokenizer and  set of TokenFilters to create a new Analyzer\r
+or creating both the Analyzer and a Tokenizer or TokenFilter.  Before pursuing this approach, you may find it worthwhile\r
+to explore the contrib/analyzers library and/or ask on the java-user@lucene.apache.org mailing list first to see if what you need already exists.\r
+If you are still committed to creating your own Analyzer or TokenStream derivation (Tokenizer or TokenFilter) have a look at\r
+the source code of any one of the many samples located in this package.\r
+</p>\r
+<p>\r
+  The following sections discuss some aspects of implementing your own analyzer.\r
+</p>\r
+<h3>Field Section Boundaries</h3>\r
+<p>\r
+  When {@link Lucene.Net.Documents.Document#add(Lucene.Net.Documents.Fieldable) document.add(field)}\r
+  is called multiple times for the same field name, we could say that each such call creates a new \r
+  section for that field in that document. \r
+  In fact, a separate call to \r
+  {@link Lucene.Net.Analysis.Analyzer#tokenStream(java.lang.String, java.io.Reader) tokenStream(field,reader)}\r
+  would take place for each of these so called "sections".\r
+  However, the default Analyzer behavior is to treat all these sections as one large section. \r
+  This allows phrase search and proximity search to seamlessly cross \r
+  boundaries between these "sections".\r
+  In other words, if a certain field "f" is added like this:\r
+  <PRE>\r
+      document.add(new Field("f","first ends",...);\r
+      document.add(new Field("f","starts two",...);\r
+      indexWriter.addDocument(document);\r
+  </PRE>\r
+  Then, a phrase search for "ends starts" would find that document.\r
+  Where desired, this behavior can be modified by introducing a "position gap" between consecutive field "sections", \r
+  simply by overriding \r
+  {@link Lucene.Net.Analysis.Analyzer#getPositionIncrementGap(java.lang.String) Analyzer.getPositionIncrementGap(fieldName)}:\r
+  <PRE>\r
+      Analyzer myAnalyzer = new StandardAnalyzer() {\r
+         public int getPositionIncrementGap(String fieldName) {\r
+           return 10;\r
+         }\r
+      };\r
+  </PRE>\r
+</p>\r
+<h3>Token Position Increments</h3>\r
+<p>\r
+   By default, all tokens created by Analyzers and Tokenizers have a \r
+   {@link Lucene.Net.Analysis.Tokenattributes.PositionIncrementAttribute#getPositionIncrement() position increment} of one.\r
+   This means that the position stored for that token in the index would be one more than\r
+   that of the previous token.\r
+   Recall that phrase and proximity searches rely on position info.\r
+</p>\r
+<p>\r
+   If the selected analyzer filters the stop words "is" and "the", then for a document \r
+   containing the string "blue is the sky", only the tokens "blue", "sky" are indexed, \r
+   with position("sky") = 1 + position("blue"). Now, a phrase query "blue is the sky"\r
+   would find that document, because the same analyzer filters the same stop words from\r
+   that query. But also the phrase query "blue sky" would find that document.\r
+</p>\r
+<p>   \r
+   If this behavior does not fit the application needs,\r
+   a modified analyzer can be used, that would increment further the positions of\r
+   tokens following a removed stop word, using\r
+   {@link Lucene.Net.Analysis.Tokenattributes.PositionIncrementAttribute#setPositionIncrement(int)}.\r
+   This can be done with something like:\r
+   <PRE>\r
+      public TokenStream tokenStream(final String fieldName, Reader reader) {\r
+        final TokenStream ts = someAnalyzer.tokenStream(fieldName, reader);\r
+        TokenStream res = new TokenStream() {\r
+          TermAttribute termAtt = (TermAttribute) addAttribute(TermAttribute.class);\r
+          PositionIncrementAttribute posIncrAtt = (PositionIncrementAttribute) addAttribute(PositionIncrementAttribute.class);\r
+        \r
+          public boolean incrementToken() throws IOException {\r
+            int extraIncrement = 0;\r
+            while (true) {\r
+              boolean hasNext = ts.incrementToken();\r
+              if (hasNext) {\r
+                if (stopWords.contains(termAtt.term())) {\r
+                  extraIncrement++; // filter this word\r
+                  continue;\r
+                } \r
+                if (extraIncrement>0) {\r
+                  posIncrAtt.setPositionIncrement(posIncrAtt.getPositionIncrement()+extraIncrement);\r
+                }\r
+              }\r
+              return hasNext;\r
+            }\r
+          }\r
+        };\r
+        return res;\r
+      }\r
+   </PRE>\r
+   Now, with this modified analyzer, the phrase query "blue sky" would find that document.\r
+   But note that this is yet not a perfect solution, because any phrase query "blue w1 w2 sky"\r
+   where both w1 and w2 are stop words would match that document.\r
+</p>\r
+<p>\r
+   Few more use cases for modifying position increments are:\r
+   <ol>\r
+     <li>Inhibiting phrase and proximity matches in sentence boundaries &ndash; for this, a tokenizer that \r
+         identifies a new sentence can add 1 to the position increment of the first token of the new sentence.</li>\r
+     <li>Injecting synonyms &ndash; here, synonyms of a token should be added after that token, \r
+         and their position increment should be set to 0.\r
+         As result, all synonyms of a token would be considered to appear in exactly the \r
+         same position as that token, and so would they be seen by phrase and proximity searches.</li>\r
+   </ol>\r
+</p>\r
+<h2>New TokenStream API</h2>\r
+<p>\r
+       With Lucene 2.9 we introduce a new TokenStream API. The old API used to produce Tokens. A Token\r
+       has getter and setter methods for different properties like positionIncrement and termText.\r
+       While this approach was sufficient for the default indexing format, it is not versatile enough for\r
+       Flexible Indexing, a term which summarizes the effort of making the Lucene indexer pluggable and extensible for custom\r
+       index formats.\r
+</p>\r
+<p>\r
+A fully customizable indexer means that users will be able to store custom data structures on disk. Therefore an API\r
+is necessary that can transport custom types of data from the documents to the indexer.\r
+</p>\r
+<h3>Attribute and AttributeSource</h3> \r
+Lucene 2.9 therefore introduces a new pair of classes called {@link Lucene.Net.Util.Attribute} and\r
+{@link Lucene.Net.Util.AttributeSource}. An Attribute serves as a\r
+particular piece of information about a text token. For example, {@link Lucene.Net.Analysis.Tokenattributes.TermAttribute}\r
+ contains the term text of a token, and {@link Lucene.Net.Analysis.Tokenattributes.OffsetAttribute} contains the start and end character offsets of a token.\r
+An AttributeSource is a collection of Attributes with a restriction: there may be only one instance of each attribute type. TokenStream now extends AttributeSource, which\r
+means that one can add Attributes to a TokenStream. Since TokenFilter extends TokenStream, all filters are also\r
+AttributeSources.\r
+<p>\r
+       Lucene now provides six Attributes out of the box, which replace the variables the Token class has:\r
+       <ul>\r
+         <li>{@link Lucene.Net.Analysis.Tokenattributes.TermAttribute}<p>The term text of a token.</p></li>\r
+         <li>{@link Lucene.Net.Analysis.Tokenattributes.OffsetAttribute}<p>The start and end offset of token in characters.</p></li>\r
+         <li>{@link Lucene.Net.Analysis.Tokenattributes.PositionIncrementAttribute}<p>See above for detailed information about position increment.</p></li>\r
+         <li>{@link Lucene.Net.Analysis.Tokenattributes.PayloadAttribute}<p>The payload that a Token can optionally have.</p></li>\r
+         <li>{@link Lucene.Net.Analysis.Tokenattributes.TypeAttribute}<p>The type of the token. Default is 'word'.</p></li>\r
+         <li>{@link Lucene.Net.Analysis.Tokenattributes.FlagsAttribute}<p>Optional flags a token can have.</p></li>\r
+       </ul>\r
+</p>\r
+<h3>Using the new TokenStream API</h3>\r
+There are a few important things to know in order to use the new API efficiently which are summarized here. You may want\r
+to walk through the example below first and come back to this section afterwards.\r
+<ol><li>\r
+Please keep in mind that an AttributeSource can only have one instance of a particular Attribute. Furthermore, if \r
+a chain of a TokenStream and multiple TokenFilters is used, then all TokenFilters in that chain share the Attributes\r
+with the TokenStream.\r
+</li>\r
+<br>\r
+<li>\r
+Attribute instances are reused for all tokens of a document. Thus, a TokenStream/-Filter needs to update\r
+the appropriate Attribute(s) in incrementToken(). The consumer, commonly the Lucene indexer, consumes the data in the\r
+Attributes and then calls incrementToken() again until it retuns false, which indicates that the end of the stream\r
+was reached. This means that in each call of incrementToken() a TokenStream/-Filter can safely overwrite the data in\r
+the Attribute instances.\r
+</li>\r
+<br>\r
+<li>\r
+For performance reasons a TokenStream/-Filter should add/get Attributes during instantiation; i.e., create an attribute in the\r
+constructor and store references to it in an instance variable.  Using an instance variable instead of calling addAttribute()/getAttribute() \r
+in incrementToken() will avoid expensive casting and attribute lookups for every token in the document.\r
+</li>\r
+<br>\r
+<li>\r
+All methods in AttributeSource are idempotent, which means calling them multiple times always yields the same\r
+result. This is especially important to know for addAttribute(). The method takes the <b>type</b> (<code>Class</code>)\r
+of an Attribute as an argument and returns an <b>instance</b>. If an Attribute of the same type was previously added, then\r
+the already existing instance is returned, otherwise a new instance is created and returned. Therefore TokenStreams/-Filters\r
+can safely call addAttribute() with the same Attribute type multiple times. Even consumers of TokenStreams should\r
+normally call addAttribute() instead of getAttribute(), because it would not fail if the TokenStream does not have this\r
+Attribute (getAttribute() would throw an IllegalArgumentException, if the Attribute is missing). More advanced code\r
+could simply check with hasAttribute(), if a TokenStream has it, and may conditionally leave out processing for\r
+extra performance.\r
+</li></ol>\r
+<h3>Example</h3>\r
+In this example we will create a WhiteSpaceTokenizer and use a LengthFilter to suppress all words that only\r
+have two or less characters. The LengthFilter is part of the Lucene core and its implementation will be explained\r
+here to illustrate the usage of the new TokenStream API.<br>\r
+Then we will develop a custom Attribute, a PartOfSpeechAttribute, and add another filter to the chain which\r
+utilizes the new custom attribute, and call it PartOfSpeechTaggingFilter.\r
+<h4>Whitespace tokenization</h4>\r
+<pre>\r
+public class MyAnalyzer extends Analyzer {\r
+\r
+  public TokenStream tokenStream(String fieldName, Reader reader) {\r
+    TokenStream stream = new WhitespaceTokenizer(reader);\r
+    return stream;\r
+  }\r
+  \r
+  public static void main(String[] args) throws IOException {\r
+    // text to tokenize\r
+    final String text = "This is a demo of the new TokenStream API";\r
+    \r
+    MyAnalyzer analyzer = new MyAnalyzer();\r
+    TokenStream stream = analyzer.tokenStream("field", new StringReader(text));\r
+    \r
+    // get the TermAttribute from the TokenStream\r
+    TermAttribute termAtt = (TermAttribute) stream.addAttribute(TermAttribute.class);\r
+\r
+    stream.reset();\r
+    \r
+    // print all tokens until stream is exhausted\r
+    while (stream.incrementToken()) {\r
+      System.out.println(termAtt.term());\r
+    }\r
+    \r
+    stream.end()\r
+    stream.close();\r
+  }\r
+}\r
+</pre>\r
+In this easy example a simple white space tokenization is performed. In main() a loop consumes the stream and\r
+prints the term text of the tokens by accessing the TermAttribute that the WhitespaceTokenizer provides. \r
+Here is the output:\r
+<pre>\r
+This\r
+is\r
+a\r
+demo\r
+of\r
+the\r
+new\r
+TokenStream\r
+API\r
+</pre>\r
+<h4>Adding a LengthFilter</h4>\r
+We want to suppress all tokens that have 2 or less characters. We can do that easily by adding a LengthFilter \r
+to the chain. Only the tokenStream() method in our analyzer needs to be changed:\r
+<pre>\r
+  public TokenStream tokenStream(String fieldName, Reader reader) {\r
+    TokenStream stream = new WhitespaceTokenizer(reader);\r
+    stream = new LengthFilter(stream, 3, Integer.MAX_VALUE);\r
+    return stream;\r
+  }\r
+</pre>\r
+Note how now only words with 3 or more characters are contained in the output:\r
+<pre>\r
+This\r
+demo\r
+the\r
+new\r
+TokenStream\r
+API\r
+</pre>\r
+Now let's take a look how the LengthFilter is implemented (it is part of Lucene's core):\r
+<pre>\r
+public final class LengthFilter extends TokenFilter {\r
+\r
+  final int min;\r
+  final int max;\r
+  \r
+  private TermAttribute termAtt;\r
+\r
+  /**\r
+   * Build a filter that removes words that are too long or too\r
+   * short from the text.\r
+   */\r
+  public LengthFilter(TokenStream in, int min, int max)\r
+  {\r
+    super(in);\r
+    this.min = min;\r
+    this.max = max;\r
+    termAtt = (TermAttribute) addAttribute(TermAttribute.class);\r
+  }\r
+  \r
+  /**\r
+   * Returns the next input Token whose term() is the right len\r
+   */\r
+  public final boolean incrementToken() throws IOException\r
+  {\r
+    assert termAtt != null;\r
+    // return the first non-stop word found\r
+    while (input.incrementToken()) {\r
+      int len = termAtt.termLength();\r
+      if (len >= min && len <= max) {\r
+          return true;\r
+      }\r
+      // note: else we ignore it but should we index each part of it?\r
+    }\r
+    // reached EOS -- return null\r
+    return false;\r
+  }\r
+}\r
+</pre>\r
+The TermAttribute is added in the constructor and stored in the instance variable <code>termAtt</code>.\r
+Remember that there can only be a single instance of TermAttribute in the chain, so in our example the \r
+<code>addAttribute()</code> call in LengthFilter returns the TermAttribute that the WhitespaceTokenizer already added. The tokens\r
+are retrieved from the input stream in the <code>incrementToken()</code> method. By looking at the term text\r
+in the TermAttribute the length of the term can be determined and too short or too long tokens are skipped. \r
+Note how <code>incrementToken()</code> can efficiently access the instance variable; no attribute lookup or downcasting\r
+is neccessary. The same is true for the consumer, which can simply use local references to the Attributes.\r
+\r
+<h4>Adding a custom Attribute</h4>\r
+Now we're going to implement our own custom Attribute for part-of-speech tagging and call it consequently \r
+<code>PartOfSpeechAttribute</code>. First we need to define the interface of the new Attribute:\r
+<pre>\r
+  public interface PartOfSpeechAttribute extends Attribute {\r
+    public static enum PartOfSpeech {\r
+      Noun, Verb, Adjective, Adverb, Pronoun, Preposition, Conjunction, Article, Unknown\r
+    }\r
+  \r
+    public void setPartOfSpeech(PartOfSpeech pos);\r
+  \r
+    public PartOfSpeech getPartOfSpeech();\r
+  }\r
+</pre>\r
+\r
+Now we also need to write the implementing class. The name of that class is important here: By default, Lucene\r
+checks if there is a class with the name of the Attribute with the postfix 'Impl'. In this example, we would\r
+consequently call the implementing class <code>PartOfSpeechAttributeImpl</code>. <br/>\r
+This should be the usual behavior. However, there is also an expert-API that allows changing these naming conventions:\r
+{@link Lucene.Net.Util.AttributeSource.AttributeFactory}. The factory accepts an Attribute interface as argument\r
+and returns an actual instance. You can implement your own factory if you need to change the default behavior. <br/><br/>\r
+\r
+Now here is the actual class that implements our new Attribute. Notice that the class has to extend\r
+{@link Lucene.Net.Util.AttributeImpl}:\r
+\r
+<pre>\r
+public final class PartOfSpeechAttributeImpl extends AttributeImpl \r
+                            implements PartOfSpeechAttribute{\r
+  \r
+  private PartOfSpeech pos = PartOfSpeech.Unknown;\r
+  \r
+  public void setPartOfSpeech(PartOfSpeech pos) {\r
+    this.pos = pos;\r
+  }\r
+  \r
+  public PartOfSpeech getPartOfSpeech() {\r
+    return pos;\r
+  }\r
+\r
+  public void clear() {\r
+    pos = PartOfSpeech.Unknown;\r
+  }\r
+\r
+  public void copyTo(AttributeImpl target) {\r
+    ((PartOfSpeechAttributeImpl) target).pos = pos;\r
+  }\r
+\r
+  public boolean equals(Object other) {\r
+    if (other == this) {\r
+      return true;\r
+    }\r
+    \r
+    if (other instanceof PartOfSpeechAttributeImpl) {\r
+      return pos == ((PartOfSpeechAttributeImpl) other).pos;\r
+    }\r
\r
+    return false;\r
+  }\r
+\r
+  public int hashCode() {\r
+    return pos.ordinal();\r
+  }\r
+}\r
+</pre>\r
+This is a simple Attribute implementation has only a single variable that stores the part-of-speech of a token. It extends the\r
+new <code>AttributeImpl</code> class and therefore implements its abstract methods <code>clear(), copyTo(), equals(), hashCode()</code>.\r
+Now we need a TokenFilter that can set this new PartOfSpeechAttribute for each token. In this example we show a very naive filter\r
+that tags every word with a leading upper-case letter as a 'Noun' and all other words as 'Unknown'.\r
+<pre>\r
+  public static class PartOfSpeechTaggingFilter extends TokenFilter {\r
+    PartOfSpeechAttribute posAtt;\r
+    TermAttribute termAtt;\r
+    \r
+    protected PartOfSpeechTaggingFilter(TokenStream input) {\r
+      super(input);\r
+      posAtt = (PartOfSpeechAttribute) addAttribute(PartOfSpeechAttribute.class);\r
+      termAtt = (TermAttribute) addAttribute(TermAttribute.class);\r
+    }\r
+    \r
+    public boolean incrementToken() throws IOException {\r
+      if (!input.incrementToken()) {return false;}\r
+      posAtt.setPartOfSpeech(determinePOS(termAtt.termBuffer(), 0, termAtt.termLength()));\r
+      return true;\r
+    }\r
+    \r
+    // determine the part of speech for the given term\r
+    protected PartOfSpeech determinePOS(char[] term, int offset, int length) {\r
+      // naive implementation that tags every uppercased word as noun\r
+      if (length > 0 && Character.isUpperCase(term[0])) {\r
+        return PartOfSpeech.Noun;\r
+      }\r
+      return PartOfSpeech.Unknown;\r
+    }\r
+  }\r
+</pre>\r
+Just like the LengthFilter, this new filter accesses the attributes it needs in the constructor and\r
+stores references in instance variables. Notice how you only need to pass in the interface of the new\r
+Attribute and instantiating the correct class is automatically been taken care of.\r
+Now we need to add the filter to the chain:\r
+<pre>\r
+  public TokenStream tokenStream(String fieldName, Reader reader) {\r
+    TokenStream stream = new WhitespaceTokenizer(reader);\r
+    stream = new LengthFilter(stream, 3, Integer.MAX_VALUE);\r
+    stream = new PartOfSpeechTaggingFilter(stream);\r
+    return stream;\r
+  }\r
+</pre>\r
+Now let's look at the output:\r
+<pre>\r
+This\r
+demo\r
+the\r
+new\r
+TokenStream\r
+API\r
+</pre>\r
+Apparently it hasn't changed, which shows that adding a custom attribute to a TokenStream/Filter chain does not\r
+affect any existing consumers, simply because they don't know the new Attribute. Now let's change the consumer\r
+to make use of the new PartOfSpeechAttribute and print it out:\r
+<pre>\r
+  public static void main(String[] args) throws IOException {\r
+    // text to tokenize\r
+    final String text = "This is a demo of the new TokenStream API";\r
+    \r
+    MyAnalyzer analyzer = new MyAnalyzer();\r
+    TokenStream stream = analyzer.tokenStream("field", new StringReader(text));\r
+    \r
+    // get the TermAttribute from the TokenStream\r
+    TermAttribute termAtt = (TermAttribute) stream.addAttribute(TermAttribute.class);\r
+    \r
+    // get the PartOfSpeechAttribute from the TokenStream\r
+    PartOfSpeechAttribute posAtt = (PartOfSpeechAttribute) stream.addAttribute(PartOfSpeechAttribute.class);\r
+    \r
+    stream.reset();\r
+\r
+    // print all tokens until stream is exhausted\r
+    while (stream.incrementToken()) {\r
+      System.out.println(termAtt.term() + ": " + posAtt.getPartOfSpeech());\r
+    }\r
+    \r
+    stream.end();\r
+    stream.close();\r
+  }\r
+</pre>\r
+The change that was made is to get the PartOfSpeechAttribute from the TokenStream and print out its contents in\r
+the while loop that consumes the stream. Here is the new output:\r
+<pre>\r
+This: Noun\r
+demo: Unknown\r
+the: Unknown\r
+new: Unknown\r
+TokenStream: Noun\r
+API: Noun\r
+</pre>\r
+Each word is now followed by its assigned PartOfSpeech tag. Of course this is a naive \r
+part-of-speech tagging. The word 'This' should not even be tagged as noun; it is only spelled capitalized because it\r
+is the first word of a sentence. Actually this is a good opportunity for an excerise. To practice the usage of the new\r
+API the reader could now write an Attribute and TokenFilter that can specify for each word if it was the first token\r
+of a sentence or not. Then the PartOfSpeechTaggingFilter can make use of this knowledge and only tag capitalized words\r
+as nouns if not the first word of a sentence (we know, this is still not a correct behavior, but hey, it's a good exercise). \r
+As a small hint, this is how the new Attribute class could begin:\r
+<pre>\r
+  public class FirstTokenOfSentenceAttributeImpl extends Attribute\r
+                   implements FirstTokenOfSentenceAttribute {\r
+    \r
+    private boolean firstToken;\r
+    \r
+    public void setFirstToken(boolean firstToken) {\r
+      this.firstToken = firstToken;\r
+    }\r
+    \r
+    public boolean getFirstToken() {\r
+      return firstToken;\r
+    }\r
+\r
+    public void clear() {\r
+      firstToken = false;\r
+    }\r
+\r
+  ...\r
+</pre>\r
+</body>\r
+</html>\r
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/PerFieldAnalyzerWrapper.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/PerFieldAnalyzerWrapper.cs
new file mode 100644 (file)
index 0000000..b4c36ca
--- /dev/null
@@ -0,0 +1,146 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Analysis
+{
+       
+       /// <summary> This analyzer is used to facilitate scenarios where different
+       /// fields require different analysis techniques.  Use {@link #addAnalyzer}
+       /// to add a non-default analyzer on a field name basis.
+       /// 
+       /// <p/>Example usage:
+       /// 
+       /// <pre>
+       /// PerFieldAnalyzerWrapper aWrapper =
+       /// new PerFieldAnalyzerWrapper(new StandardAnalyzer());
+       /// aWrapper.addAnalyzer("firstname", new KeywordAnalyzer());
+       /// aWrapper.addAnalyzer("lastname", new KeywordAnalyzer());
+       /// </pre>
+       /// 
+       /// <p/>In this example, StandardAnalyzer will be used for all fields except "firstname"
+       /// and "lastname", for which KeywordAnalyzer will be used.
+       /// 
+       /// <p/>A PerFieldAnalyzerWrapper can be used like any other analyzer, for both indexing
+       /// and query parsing.
+       /// </summary>
+       public class PerFieldAnalyzerWrapper:Analyzer
+       {
+               private Analyzer defaultAnalyzer;
+               private System.Collections.IDictionary analyzerMap = new System.Collections.Hashtable();
+               
+               
+               /// <summary> Constructs with default analyzer.
+               /// 
+               /// </summary>
+               /// <param name="defaultAnalyzer">Any fields not specifically
+               /// defined to use a different analyzer will use the one provided here.
+               /// </param>
+               public PerFieldAnalyzerWrapper(Analyzer defaultAnalyzer):this(defaultAnalyzer, null)
+               {
+               }
+               
+               /// <summary> Constructs with default analyzer and a map of analyzers to use for 
+               /// specific fields.
+               /// 
+               /// </summary>
+               /// <param name="defaultAnalyzer">Any fields not specifically
+               /// defined to use a different analyzer will use the one provided here.
+               /// </param>
+               /// <param name="fieldAnalyzers">a Map (String field name to the Analyzer) to be 
+               /// used for those fields 
+               /// </param>
+               public PerFieldAnalyzerWrapper(Analyzer defaultAnalyzer, System.Collections.IDictionary fieldAnalyzers)
+               {
+                       this.defaultAnalyzer = defaultAnalyzer;
+                       if (fieldAnalyzers != null)
+                       {
+                               System.Collections.ArrayList keys = new System.Collections.ArrayList(fieldAnalyzers.Keys);
+                               System.Collections.ArrayList values = new System.Collections.ArrayList(fieldAnalyzers.Values);
+
+                               for (int i=0; i < keys.Count; i++)
+                                       analyzerMap[keys[i]] = values[i];
+                       }
+                       SetOverridesTokenStreamMethod(typeof(PerFieldAnalyzerWrapper));
+               }
+               
+               
+               /// <summary> Defines an analyzer to use for the specified field.
+               /// 
+               /// </summary>
+               /// <param name="fieldName">field name requiring a non-default analyzer
+               /// </param>
+               /// <param name="analyzer">non-default analyzer to use for field
+               /// </param>
+               public virtual void  AddAnalyzer(System.String fieldName, Analyzer analyzer)
+               {
+                       analyzerMap[fieldName] = analyzer;
+               }
+               
+               public override TokenStream TokenStream(System.String fieldName, System.IO.TextReader reader)
+               {
+                       Analyzer analyzer = (Analyzer) analyzerMap[fieldName];
+                       if (analyzer == null)
+                       {
+                               analyzer = defaultAnalyzer;
+                       }
+                       
+                       return analyzer.TokenStream(fieldName, reader);
+               }
+               
+               public override TokenStream ReusableTokenStream(System.String fieldName, System.IO.TextReader reader)
+               {
+                       if (overridesTokenStreamMethod)
+                       {
+                               // LUCENE-1678: force fallback to tokenStream() if we
+                               // have been subclassed and that subclass overrides
+                               // tokenStream but not reusableTokenStream
+                               return TokenStream(fieldName, reader);
+                       }
+                       Analyzer analyzer = (Analyzer) analyzerMap[fieldName];
+                       if (analyzer == null)
+                               analyzer = defaultAnalyzer;
+                       
+                       return analyzer.ReusableTokenStream(fieldName, reader);
+               }
+               
+               /// <summary>Return the positionIncrementGap from the analyzer assigned to fieldName </summary>
+               public override int GetPositionIncrementGap(System.String fieldName)
+               {
+                       Analyzer analyzer = (Analyzer) analyzerMap[fieldName];
+                       if (analyzer == null)
+                               analyzer = defaultAnalyzer;
+                       return analyzer.GetPositionIncrementGap(fieldName);
+               }
+
+        /// <summary> Return the offsetGap from the analyzer assigned to field </summary>
+        public override int GetOffsetGap(Mono.Lucene.Net.Documents.Fieldable field)
+        {
+            Analyzer analyzer = (Analyzer)analyzerMap[field.Name()];
+            if (analyzer == null)
+                analyzer = defaultAnalyzer;
+            return analyzer.GetOffsetGap(field);
+        }
+               
+               public override System.String ToString()
+               {
+                       // {{Aroush-2.9}} will 'analyzerMap.ToString()' work in the same way as Java's java.util.HashMap.toString()? 
+                       return "PerFieldAnalyzerWrapper(" + analyzerMap.ToString() + ", default=" + defaultAnalyzer + ")";
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/PorterStemFilter.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/PorterStemFilter.cs
new file mode 100644 (file)
index 0000000..cd302c7
--- /dev/null
@@ -0,0 +1,64 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using TermAttribute = Mono.Lucene.Net.Analysis.Tokenattributes.TermAttribute;
+
+namespace Mono.Lucene.Net.Analysis
+{
+       
+       /// <summary>Transforms the token stream as per the Porter stemming algorithm.
+       /// Note: the input to the stemming filter must already be in lower case,
+       /// so you will need to use LowerCaseFilter or LowerCaseTokenizer farther
+       /// down the Tokenizer chain in order for this to work properly!
+       /// <p/>
+       /// To use this filter with other analyzers, you'll want to write an
+       /// Analyzer class that sets up the TokenStream chain as you want it.
+       /// To use this with LowerCaseTokenizer, for example, you'd write an
+       /// analyzer like this:
+       /// <p/>
+       /// <PRE>
+       /// class MyAnalyzer extends Analyzer {
+       /// public final TokenStream tokenStream(String fieldName, Reader reader) {
+       /// return new PorterStemFilter(new LowerCaseTokenizer(reader));
+       /// }
+       /// }
+       /// </PRE>
+       /// </summary>
+       public sealed class PorterStemFilter:TokenFilter
+       {
+               private PorterStemmer stemmer;
+               private TermAttribute termAtt;
+               
+               public PorterStemFilter(TokenStream in_Renamed):base(in_Renamed)
+               {
+                       stemmer = new PorterStemmer();
+                       termAtt = (TermAttribute) AddAttribute(typeof(TermAttribute));
+               }
+               
+               public override bool IncrementToken()
+               {
+                       if (!input.IncrementToken())
+                               return false;
+                       
+                       if (stemmer.Stem(termAtt.TermBuffer(), 0, termAtt.TermLength()))
+                               termAtt.SetTermBuffer(stemmer.GetResultBuffer(), 0, stemmer.GetResultLength());
+                       return true;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/PorterStemmer.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/PorterStemmer.cs
new file mode 100644 (file)
index 0000000..64b5eb5
--- /dev/null
@@ -0,0 +1,746 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+
+Porter stemmer in Java. The original paper is in
+
+Porter, 1980, An algorithm for suffix stripping, Program, Vol. 14,
+no. 3, pp 130-137,
+
+See also http://www.tartarus.org/~martin/PorterStemmer/index.html
+
+Bug 1 (reported by Gonzalo Parra 16/10/99) fixed as marked below.
+Tthe words 'aed', 'eed', 'oed' leave k at 'a' for step 3, and b[k-1]
+is then out outside the bounds of b.
+
+Similarly,
+
+Bug 2 (reported by Steve Dyrdahl 22/2/00) fixed as marked below.
+'ion' by itself leaves j = -1 in the test for 'ion' in step 5, and
+b[j] is then outside the bounds of b.
+
+Release 3.
+
+[ This version is derived from Release 3, modified by Brian Goetz to
+optimize for fewer object creations.  ]
+*/
+using System;
+namespace Mono.Lucene.Net.Analysis
+{
+       
+       /// <summary> 
+       /// Stemmer, implementing the Porter Stemming Algorithm
+       /// 
+       /// The Stemmer class transforms a word into its root form.  The input
+       /// word can be provided a character at time (by calling add()), or at once
+       /// by calling one of the various stem(something) methods.
+       /// </summary>
+       
+       class PorterStemmer
+       {
+               private char[] b;
+               private int i, j, k, k0;
+               private bool dirty = false;
+               private const int INC = 50; /* unit of size whereby b is increased */
+               private const int EXTRA = 1;
+               
+               public PorterStemmer()
+               {
+                       b = new char[INC];
+                       i = 0;
+               }
+               
+               /// <summary> reset() resets the stemmer so it can stem another word.  If you invoke
+               /// the stemmer by calling add(char) and then stem(), you must call reset()
+               /// before starting another word.
+               /// </summary>
+               public virtual void  Reset()
+               {
+                       i = 0; dirty = false;
+               }
+               
+               /// <summary> Add a character to the word being stemmed.  When you are finished
+               /// adding characters, you can call stem(void) to process the word.
+               /// </summary>
+               public virtual void  Add(char ch)
+               {
+                       if (b.Length <= i + EXTRA)
+                       {
+                               char[] new_b = new char[b.Length + INC];
+                               Array.Copy(b, 0, new_b, 0, b.Length);
+                               b = new_b;
+                       }
+                       b[i++] = ch;
+               }
+               
+               /// <summary> After a word has been stemmed, it can be retrieved by toString(),
+               /// or a reference to the internal buffer can be retrieved by getResultBuffer
+               /// and getResultLength (which is generally more efficient.)
+               /// </summary>
+               public override System.String ToString()
+               {
+                       return new System.String(b, 0, i);
+               }
+               
+               /// <summary> Returns the length of the word resulting from the stemming process.</summary>
+               public virtual int GetResultLength()
+               {
+                       return i;
+               }
+               
+               /// <summary> Returns a reference to a character buffer containing the results of
+               /// the stemming process.  You also need to consult getResultLength()
+               /// to determine the length of the result.
+               /// </summary>
+               public virtual char[] GetResultBuffer()
+               {
+                       return b;
+               }
+               
+               /* cons(i) is true <=> b[i] is a consonant. */
+               
+               private bool Cons(int i)
+               {
+                       switch (b[i])
+                       {
+                               
+                               case 'a': 
+                               case 'e': 
+                               case 'i': 
+                               case 'o': 
+                               case 'u': 
+                                       return false;
+                               
+                               case 'y': 
+                                       return (i == k0)?true:!Cons(i - 1);
+                               
+                               default: 
+                                       return true;
+                               
+                       }
+               }
+               
+               /* m() measures the number of consonant sequences between k0 and j. if c is
+               a consonant sequence and v a vowel sequence, and <..> indicates arbitrary
+               presence,
+               
+               <c><v>       gives 0
+               <c>vc<v>     gives 1
+               <c>vcvc<v>   gives 2
+               <c>vcvcvc<v> gives 3
+               ....
+               */
+               
+               private int M()
+               {
+                       int n = 0;
+                       int i = k0;
+                       while (true)
+                       {
+                               if (i > j)
+                                       return n;
+                               if (!Cons(i))
+                                       break;
+                               i++;
+                       }
+                       i++;
+                       while (true)
+                       {
+                               while (true)
+                               {
+                                       if (i > j)
+                                               return n;
+                                       if (Cons(i))
+                                               break;
+                                       i++;
+                               }
+                               i++;
+                               n++;
+                               while (true)
+                               {
+                                       if (i > j)
+                                               return n;
+                                       if (!Cons(i))
+                                               break;
+                                       i++;
+                               }
+                               i++;
+                       }
+               }
+               
+               /* vowelinstem() is true <=> k0,...j contains a vowel */
+               
+               private bool Vowelinstem()
+               {
+                       int i;
+                       for (i = k0; i <= j; i++)
+                               if (!Cons(i))
+                                       return true;
+                       return false;
+               }
+               
+               /* doublec(j) is true <=> j,(j-1) contain a double consonant. */
+               
+               private bool Doublec(int j)
+               {
+                       if (j < k0 + 1)
+                               return false;
+                       if (b[j] != b[j - 1])
+                               return false;
+                       return Cons(j);
+               }
+               
+               /* cvc(i) is true <=> i-2,i-1,i has the form consonant - vowel - consonant
+               and also if the second c is not w,x or y. this is used when trying to
+               restore an e at the end of a short word. e.g.
+               
+               cav(e), lov(e), hop(e), crim(e), but
+               snow, box, tray.
+               
+               */
+               
+               private bool Cvc(int i)
+               {
+                       if (i < k0 + 2 || !Cons(i) || Cons(i - 1) || !Cons(i - 2))
+                               return false;
+                       else
+                       {
+                               int ch = b[i];
+                               if (ch == 'w' || ch == 'x' || ch == 'y')
+                                       return false;
+                       }
+                       return true;
+               }
+               
+               private bool Ends(System.String s)
+               {
+                       int l = s.Length;
+                       int o = k - l + 1;
+                       if (o < k0)
+                               return false;
+                       for (int i = 0; i < l; i++)
+                               if (b[o + i] != s[i])
+                                       return false;
+                       j = k - l;
+                       return true;
+               }
+               
+               /* setto(s) sets (j+1),...k to the characters in the string s, readjusting
+               k. */
+               
+               internal virtual void  Setto(System.String s)
+               {
+                       int l = s.Length;
+                       int o = j + 1;
+                       for (int i = 0; i < l; i++)
+                               b[o + i] = s[i];
+                       k = j + l;
+                       dirty = true;
+               }
+               
+               /* r(s) is used further down. */
+               
+               internal virtual void  R(System.String s)
+               {
+                       if (M() > 0)
+                               Setto(s);
+               }
+               
+               /* step1() gets rid of plurals and -ed or -ing. e.g.
+               
+               caresses  ->  caress
+               ponies    ->  poni
+               ties      ->  ti
+               caress    ->  caress
+               cats      ->  cat
+               
+               feed      ->  feed
+               agreed    ->  agree
+               disabled  ->  disable
+               
+               matting   ->  mat
+               mating    ->  mate
+               meeting   ->  meet
+               milling   ->  mill
+               messing   ->  mess
+               
+               meetings  ->  meet
+               
+               */
+               
+               private void  Step1()
+               {
+                       if (b[k] == 's')
+                       {
+                               if (Ends("sses"))
+                                       k -= 2;
+                               else if (Ends("ies"))
+                                       Setto("i");
+                               else if (b[k - 1] != 's')
+                                       k--;
+                       }
+                       if (Ends("eed"))
+                       {
+                               if (M() > 0)
+                                       k--;
+                       }
+                       else if ((Ends("ed") || Ends("ing")) && Vowelinstem())
+                       {
+                               k = j;
+                               if (Ends("at"))
+                                       Setto("ate");
+                               else if (Ends("bl"))
+                                       Setto("ble");
+                               else if (Ends("iz"))
+                                       Setto("ize");
+                               else if (Doublec(k))
+                               {
+                                       int ch = b[k--];
+                                       if (ch == 'l' || ch == 's' || ch == 'z')
+                                               k++;
+                               }
+                               else if (M() == 1 && Cvc(k))
+                                       Setto("e");
+                       }
+               }
+               
+               /* step2() turns terminal y to i when there is another vowel in the stem. */
+               
+               private void  Step2()
+               {
+                       if (Ends("y") && Vowelinstem())
+                       {
+                               b[k] = 'i';
+                               dirty = true;
+                       }
+               }
+               
+               /* step3() maps double suffices to single ones. so -ization ( = -ize plus
+               -ation) maps to -ize etc. note that the string before the suffix must give
+               m() > 0. */
+               
+               private void  Step3()
+               {
+                       if (k == k0)
+                               return ; /* For Bug 1 */
+                       switch (b[k - 1])
+                       {
+                               
+                               case 'a': 
+                                       if (Ends("ational"))
+                                       {
+                                               R("ate"); break;
+                                       }
+                                       if (Ends("tional"))
+                                       {
+                                               R("tion"); break;
+                                       }
+                                       break;
+                               
+                               case 'c': 
+                                       if (Ends("enci"))
+                                       {
+                                               R("ence"); break;
+                                       }
+                                       if (Ends("anci"))
+                                       {
+                                               R("ance"); break;
+                                       }
+                                       break;
+                               
+                               case 'e': 
+                                       if (Ends("izer"))
+                                       {
+                                               R("ize"); break;
+                                       }
+                                       break;
+                               
+                               case 'l': 
+                                       if (Ends("bli"))
+                                       {
+                                               R("ble"); break;
+                                       }
+                                       if (Ends("alli"))
+                                       {
+                                               R("al"); break;
+                                       }
+                                       if (Ends("entli"))
+                                       {
+                                               R("ent"); break;
+                                       }
+                                       if (Ends("eli"))
+                                       {
+                                               R("e"); break;
+                                       }
+                                       if (Ends("ousli"))
+                                       {
+                                               R("ous"); break;
+                                       }
+                                       break;
+                               
+                               case 'o': 
+                                       if (Ends("ization"))
+                                       {
+                                               R("ize"); break;
+                                       }
+                                       if (Ends("ation"))
+                                       {
+                                               R("ate"); break;
+                                       }
+                                       if (Ends("ator"))
+                                       {
+                                               R("ate"); break;
+                                       }
+                                       break;
+                               
+                               case 's': 
+                                       if (Ends("alism"))
+                                       {
+                                               R("al"); break;
+                                       }
+                                       if (Ends("iveness"))
+                                       {
+                                               R("ive"); break;
+                                       }
+                                       if (Ends("fulness"))
+                                       {
+                                               R("ful"); break;
+                                       }
+                                       if (Ends("ousness"))
+                                       {
+                                               R("ous"); break;
+                                       }
+                                       break;
+                               
+                               case 't': 
+                                       if (Ends("aliti"))
+                                       {
+                                               R("al"); break;
+                                       }
+                                       if (Ends("iviti"))
+                                       {
+                                               R("ive"); break;
+                                       }
+                                       if (Ends("biliti"))
+                                       {
+                                               R("ble"); break;
+                                       }
+                                       break;
+                               
+                               case 'g': 
+                                       if (Ends("logi"))
+                                       {
+                                               R("log"); break;
+                                       }
+                                       break;
+                               }
+               }
+               
+               /* step4() deals with -ic-, -full, -ness etc. similar strategy to step3. */
+               
+               private void  Step4()
+               {
+                       switch (b[k])
+                       {
+                               
+                               case 'e': 
+                                       if (Ends("icate"))
+                                       {
+                                               R("ic"); break;
+                                       }
+                                       if (Ends("ative"))
+                                       {
+                                               R(""); break;
+                                       }
+                                       if (Ends("alize"))
+                                       {
+                                               R("al"); break;
+                                       }
+                                       break;
+                               
+                               case 'i': 
+                                       if (Ends("iciti"))
+                                       {
+                                               R("ic"); break;
+                                       }
+                                       break;
+                               
+                               case 'l': 
+                                       if (Ends("ical"))
+                                       {
+                                               R("ic"); break;
+                                       }
+                                       if (Ends("ful"))
+                                       {
+                                               R(""); break;
+                                       }
+                                       break;
+                               
+                               case 's': 
+                                       if (Ends("ness"))
+                                       {
+                                               R(""); break;
+                                       }
+                                       break;
+                               }
+               }
+               
+               /* step5() takes off -ant, -ence etc., in context <c>vcvc<v>. */
+               
+               private void  Step5()
+               {
+                       if (k == k0)
+                               return ; /* for Bug 1 */
+                       switch (b[k - 1])
+                       {
+                               
+                               case 'a': 
+                                       if (Ends("al"))
+                                               break;
+                                       return ;
+                               
+                               case 'c': 
+                                       if (Ends("ance"))
+                                               break;
+                                       if (Ends("ence"))
+                                               break;
+                                       return ;
+                               
+                               case 'e': 
+                                       if (Ends("er"))
+                                               break; return ;
+                               
+                               case 'i': 
+                                       if (Ends("ic"))
+                                               break; return ;
+                               
+                               case 'l': 
+                                       if (Ends("able"))
+                                               break;
+                                       if (Ends("ible"))
+                                               break; return ;
+                               
+                               case 'n': 
+                                       if (Ends("ant"))
+                                               break;
+                                       if (Ends("ement"))
+                                               break;
+                                       if (Ends("ment"))
+                                               break;
+                                       /* element etc. not stripped before the m */
+                                       if (Ends("ent"))
+                                               break;
+                                       return ;
+                               
+                               case 'o': 
+                                       if (Ends("ion") && j >= 0 && (b[j] == 's' || b[j] == 't'))
+                                               break;
+                                       /* j >= 0 fixes Bug 2 */
+                                       if (Ends("ou"))
+                                               break;
+                                       return ;
+                                       /* takes care of -ous */
+                               
+                               case 's': 
+                                       if (Ends("ism"))
+                                               break;
+                                       return ;
+                               
+                               case 't': 
+                                       if (Ends("ate"))
+                                               break;
+                                       if (Ends("iti"))
+                                               break;
+                                       return ;
+                               
+                               case 'u': 
+                                       if (Ends("ous"))
+                                               break;
+                                       return ;
+                               
+                               case 'v': 
+                                       if (Ends("ive"))
+                                               break;
+                                       return ;
+                               
+                               case 'z': 
+                                       if (Ends("ize"))
+                                               break;
+                                       return ;
+                               
+                               default: 
+                                       return ;
+                               
+                       }
+                       if (M() > 1)
+                               k = j;
+               }
+               
+               /* step6() removes a final -e if m() > 1. */
+               
+               private void  Step6()
+               {
+                       j = k;
+                       if (b[k] == 'e')
+                       {
+                               int a = M();
+                               if (a > 1 || a == 1 && !Cvc(k - 1))
+                                       k--;
+                       }
+                       if (b[k] == 'l' && Doublec(k) && M() > 1)
+                               k--;
+               }
+               
+               
+               /// <summary> Stem a word provided as a String.  Returns the result as a String.</summary>
+               public virtual System.String Stem(System.String s)
+               {
+                       if (Stem(s.ToCharArray(), s.Length))
+                       {
+                               return ToString();
+                       }
+                       else
+                               return s;
+               }
+               
+               /// <summary>Stem a word contained in a char[].  Returns true if the stemming process
+               /// resulted in a word different from the input.  You can retrieve the
+               /// result with getResultLength()/getResultBuffer() or toString().
+               /// </summary>
+               public virtual bool Stem(char[] word)
+               {
+                       return Stem(word, word.Length);
+               }
+               
+               /// <summary>Stem a word contained in a portion of a char[] array.  Returns
+               /// true if the stemming process resulted in a word different from
+               /// the input.  You can retrieve the result with
+               /// getResultLength()/getResultBuffer() or toString().
+               /// </summary>
+               public virtual bool Stem(char[] wordBuffer, int offset, int wordLen)
+               {
+                       Reset();
+                       if (b.Length < wordLen)
+                       {
+                               char[] new_b = new char[wordLen + EXTRA];
+                               b = new_b;
+                       }
+                       Array.Copy(wordBuffer, offset, b, 0, wordLen);
+                       i = wordLen;
+                       return Stem(0);
+               }
+               
+               /// <summary>Stem a word contained in a leading portion of a char[] array.
+               /// Returns true if the stemming process resulted in a word different
+               /// from the input.  You can retrieve the result with
+               /// getResultLength()/getResultBuffer() or toString().
+               /// </summary>
+               public virtual bool Stem(char[] word, int wordLen)
+               {
+                       return Stem(word, 0, wordLen);
+               }
+               
+               /// <summary>Stem the word placed into the Stemmer buffer through calls to add().
+               /// Returns true if the stemming process resulted in a word different
+               /// from the input.  You can retrieve the result with
+               /// getResultLength()/getResultBuffer() or toString().
+               /// </summary>
+               public virtual bool Stem()
+               {
+                       return Stem(0);
+               }
+               
+               public virtual bool Stem(int i0)
+               {
+                       k = i - 1;
+                       k0 = i0;
+                       if (k > k0 + 1)
+                       {
+                               Step1(); Step2(); Step3(); Step4(); Step5(); Step6();
+                       }
+                       // Also, a word is considered dirty if we lopped off letters
+                       // Thanks to Ifigenia Vairelles for pointing this out.
+                       if (i != k + 1)
+                               dirty = true;
+                       i = k + 1;
+                       return dirty;
+               }
+               
+               /// <summary>Test program for demonstrating the Stemmer.  It reads a file and
+               /// stems each word, writing the result to standard out.
+               /// Usage: Stemmer file-name
+               /// </summary>
+               [STAThread]
+               public static void  Main(System.String[] args)
+               {
+                       PorterStemmer s = new PorterStemmer();
+                       
+                       for (int i = 0; i < args.Length; i++)
+                       {
+                               try
+                               {
+                                       System.IO.Stream in_Renamed = new System.IO.FileStream(args[i], System.IO.FileMode.Open, System.IO.FileAccess.Read);
+                                       byte[] buffer = new byte[1024];
+                                       int bufferLen, offset, ch;
+                                       
+                                       bufferLen = in_Renamed.Read(buffer, 0, buffer.Length);
+                                       offset = 0;
+                                       s.Reset();
+                                       
+                                       while (true)
+                                       {
+                                               if (offset < bufferLen)
+                                                       ch = buffer[offset++];
+                                               else
+                                               {
+                                                       bufferLen = in_Renamed.Read(buffer, 0, buffer.Length);
+                                                       offset = 0;
+                                                       if (bufferLen < 0)
+                                                               ch = - 1;
+                                                       else
+                                                               ch = buffer[offset++];
+                                               }
+                                               
+                                               if (System.Char.IsLetter((char) ch))
+                                               {
+                                                       s.Add(System.Char.ToLower((char) ch));
+                                               }
+                                               else
+                                               {
+                                                       s.Stem();
+                                                       System.Console.Out.Write(s.ToString());
+                                                       s.Reset();
+                                                       if (ch < 0)
+                                                               break;
+                                                       else
+                                                       {
+                                                               System.Console.Out.Write((char) ch);
+                                                       }
+                                               }
+                                       }
+                                       
+                                       in_Renamed.Close();
+                               }
+                               catch (System.IO.IOException e)
+                               {
+                                       System.Console.Out.WriteLine("error reading " + args[i]);
+                               }
+                       }
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/SimpleAnalyzer.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/SimpleAnalyzer.cs
new file mode 100644 (file)
index 0000000..54cd197
--- /dev/null
@@ -0,0 +1,47 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Analysis
+{
+       
+       /// <summary>An {@link Analyzer} that filters {@link LetterTokenizer} 
+       /// with {@link LowerCaseFilter} 
+       /// </summary>
+       
+       public sealed class SimpleAnalyzer:Analyzer
+       {
+               public override TokenStream TokenStream(System.String fieldName, System.IO.TextReader reader)
+               {
+                       return new LowerCaseTokenizer(reader);
+               }
+               
+               public override TokenStream ReusableTokenStream(System.String fieldName, System.IO.TextReader reader)
+               {
+                       Tokenizer tokenizer = (Tokenizer) GetPreviousTokenStream();
+                       if (tokenizer == null)
+                       {
+                               tokenizer = new LowerCaseTokenizer(reader);
+                               SetPreviousTokenStream(tokenizer);
+                       }
+                       else
+                               tokenizer.Reset(reader);
+                       return tokenizer;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/SinkTokenizer.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/SinkTokenizer.cs
new file mode 100644 (file)
index 0000000..908a5ef
--- /dev/null
@@ -0,0 +1,123 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Analysis
+{
+       
+       
+       /// <summary> A SinkTokenizer can be used to cache Tokens for use in an Analyzer
+       /// <p/>
+       /// WARNING: {@link TeeTokenFilter} and {@link SinkTokenizer} only work with the old TokenStream API.
+       /// If you switch to the new API, you need to use {@link TeeSinkTokenFilter} instead, which offers 
+       /// the same functionality.
+       /// </summary>
+       /// <seealso cref="TeeTokenFilter">
+       /// </seealso>
+       /// <deprecated> Use {@link TeeSinkTokenFilter} instead
+       /// 
+       /// 
+       /// </deprecated>
+    [Obsolete("Use TeeSinkTokenFilter instead")]
+       public class SinkTokenizer:Tokenizer
+       {
+               protected internal System.Collections.IList lst = new System.Collections.ArrayList();
+               protected internal System.Collections.IEnumerator iter;
+               
+               public SinkTokenizer(System.Collections.IList input)
+               {
+                       this.lst = input;
+                       if (this.lst == null)
+                               this.lst = new System.Collections.ArrayList();
+               }
+               
+               public SinkTokenizer()
+               {
+                       this.lst = new System.Collections.ArrayList();
+               }
+               
+               public SinkTokenizer(int initCap)
+               {
+                       this.lst = new System.Collections.ArrayList(initCap);
+               }
+               
+               /// <summary> Get the tokens in the internal List.
+               /// <p/>
+               /// WARNING: Adding tokens to this list requires the {@link #Reset()} method to be called in order for them
+               /// to be made available.  Also, this Tokenizer does nothing to protect against {@link java.util.ConcurrentModificationException}s
+               /// in the case of adds happening while {@link #Next(Mono.Lucene.Net.Analysis.Token)} is being called.
+               /// <p/>
+               /// WARNING: Since this SinkTokenizer can be reset and the cached tokens made available again, do not modify them. Modify clones instead.
+               /// 
+               /// </summary>
+               /// <returns> A List of {@link Mono.Lucene.Net.Analysis.Token}s
+               /// </returns>
+               public virtual System.Collections.IList GetTokens()
+               {
+                       return lst;
+               }
+               
+               /// <summary> Returns the next token out of the list of cached tokens</summary>
+               /// <returns> The next {@link Mono.Lucene.Net.Analysis.Token} in the Sink.
+               /// </returns>
+               /// <throws>  IOException </throws>
+        [Obsolete("Mono.Lucene.Net-2.9.1. This method overrides obsolete member Mono.Lucene.Net.Analysis.TokenStream.Next(Mono.Lucene.Net.Analysis.Token)")]
+               public override Token Next(Token reusableToken)
+               {
+                       System.Diagnostics.Debug.Assert(reusableToken != null);
+                       if (iter == null)
+                               iter = lst.GetEnumerator();
+                       // Since this TokenStream can be reset we have to maintain the tokens as immutable
+                       if (iter.MoveNext())
+                       {
+                               Token nextToken = (Token) iter.Current;
+                               return (Token) nextToken.Clone();
+                       }
+                       return null;
+               }
+               
+               /// <summary> Override this method to cache only certain tokens, or new tokens based
+               /// on the old tokens.
+               /// 
+               /// </summary>
+               /// <param name="t">The {@link Mono.Lucene.Net.Analysis.Token} to add to the sink
+               /// </param>
+               public virtual void  Add(Token t)
+               {
+                       if (t == null)
+                               return ;
+                       lst.Add((Token) t.Clone());
+               }
+               
+               public override void  Close()
+               {
+                       //nothing to close
+                       input = null;
+                       lst = null;
+               }
+               
+               /// <summary> Reset the internal data structures to the start at the front of the list of tokens.  Should be called
+               /// if tokens were added to the list after an invocation of {@link #Next(Token)}
+               /// </summary>
+               /// <throws>  IOException </throws>
+               public override void  Reset()
+               {
+                       iter = lst.GetEnumerator();
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/Standard/.gitattributes b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/Standard/.gitattributes
new file mode 100644 (file)
index 0000000..469cccd
--- /dev/null
@@ -0,0 +1,6 @@
+/Package.html -crlf
+/StandardAnalyzer.cs -crlf
+/StandardFilter.cs -crlf
+/StandardTokenizer.cs -crlf
+/StandardTokenizerImpl.cs -crlf
+/StandardTokenizerImpl.jflex -crlf
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/Standard/Package.html b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/Standard/Package.html
new file mode 100644 (file)
index 0000000..02b2f16
--- /dev/null
@@ -0,0 +1,25 @@
+<!doctype html public "-//w3c//dtd html 4.0 transitional//en">\r
+<!--\r
+ Licensed to the Apache Software Foundation (ASF) under one or more\r
+ contributor license agreements.  See the NOTICE file distributed with\r
+ this work for additional information regarding copyright ownership.\r
+ The ASF licenses this file to You under the Apache License, Version 2.0\r
+ (the "License"); you may not use this file except in compliance with\r
+ the License.  You may obtain a copy of the License at\r
+\r
+     http://www.apache.org/licenses/LICENSE-2.0\r
+\r
+ Unless required by applicable law or agreed to in writing, software\r
+ distributed under the License is distributed on an "AS IS" BASIS,\r
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
+ See the License for the specific language governing permissions and\r
+ limitations under the License.\r
+-->\r
+<html>\r
+<head>\r
+   <meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">\r
+</head>\r
+<body>\r
+A fast grammar-based tokenizer constructed with JFlex.\r
+</body>\r
+</html>\r
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/Standard/StandardAnalyzer.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/Standard/StandardAnalyzer.cs
new file mode 100644 (file)
index 0000000..4f96aef
--- /dev/null
@@ -0,0 +1,435 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using Mono.Lucene.Net.Analysis;
+using Version = Mono.Lucene.Net.Util.Version;
+
+namespace Mono.Lucene.Net.Analysis.Standard
+{
+       
+       /// <summary> Filters {@link StandardTokenizer} with {@link StandardFilter},
+       /// {@link LowerCaseFilter} and {@link StopFilter}, using a list of English stop
+       /// words.
+       /// 
+       /// <a name="version"/>
+       /// <p/>
+       /// You must specify the required {@link Version} compatibility when creating
+       /// StandardAnalyzer:
+       /// <ul>
+       /// <li>As of 2.9, StopFilter preserves position increments</li>
+       /// <li>As of 2.4, Tokens incorrectly identified as acronyms are corrected (see
+       /// <a href="https://issues.apache.org/jira/browse/LUCENE-1068">LUCENE-1608</a></li>
+       /// </ul>
+       /// 
+       /// </summary>
+       /// <version>  $Id: StandardAnalyzer.java 829134 2009-10-23 17:18:53Z mikemccand $
+       /// </version>
+       public class StandardAnalyzer : Analyzer
+       {
+               private System.Collections.Hashtable stopSet;
+               
+               /// <summary> Specifies whether deprecated acronyms should be replaced with HOST type.
+               /// This is false by default to support backward compatibility.
+               /// 
+               /// </summary>
+               /// <deprecated> this should be removed in the next release (3.0).
+               /// 
+               /// See https://issues.apache.org/jira/browse/LUCENE-1068
+               /// </deprecated>
+        [Obsolete("this should be removed in the next release (3.0).")]
+               private bool replaceInvalidAcronym = defaultReplaceInvalidAcronym;
+               
+               private static bool defaultReplaceInvalidAcronym;
+               private bool enableStopPositionIncrements;
+               
+               // @deprecated
+        [Obsolete]
+               private bool useDefaultStopPositionIncrements;
+               
+               /// <summary> </summary>
+               /// <returns> true if new instances of StandardTokenizer will
+               /// replace mischaracterized acronyms
+               /// 
+               /// See https://issues.apache.org/jira/browse/LUCENE-1068
+               /// </returns>
+               /// <deprecated> This will be removed (hardwired to true) in 3.0
+               /// </deprecated>
+        [Obsolete("This will be removed (hardwired to true) in 3.0")]
+               public static bool GetDefaultReplaceInvalidAcronym()
+               {
+                       return defaultReplaceInvalidAcronym;
+               }
+               
+               /// <summary> </summary>
+               /// <param name="replaceInvalidAcronym">Set to true to have new
+               /// instances of StandardTokenizer replace mischaracterized
+               /// acronyms by default.  Set to false to preserve the
+               /// previous (before 2.4) buggy behavior.  Alternatively,
+               /// set the system property
+               /// Mono.Lucene.Net.Analysis.Standard.StandardAnalyzer.replaceInvalidAcronym
+               /// to false.
+               /// 
+               /// See https://issues.apache.org/jira/browse/LUCENE-1068
+               /// </param>
+               /// <deprecated> This will be removed (hardwired to true) in 3.0
+               /// </deprecated>
+        [Obsolete("This will be removed (hardwired to true) in 3.0")]
+               public static void  SetDefaultReplaceInvalidAcronym(bool replaceInvalidAcronym)
+               {
+                       defaultReplaceInvalidAcronym = replaceInvalidAcronym;
+               }
+               
+               
+               /// <summary>An array containing some common English words that are usually not
+               /// useful for searching. 
+               /// </summary>
+               /// <deprecated> Use {@link #STOP_WORDS_SET} instead 
+               /// </deprecated>
+        [Obsolete("Use STOP_WORDS_SET instead ")]
+               public static readonly System.String[] STOP_WORDS;
+               
+               /// <summary>An unmodifiable set containing some common English words that are usually not
+               /// useful for searching. 
+               /// </summary>
+               public static readonly System.Collections.Hashtable STOP_WORDS_SET;
+               
+               /// <summary>Builds an analyzer with the default stop words ({@link
+               /// #STOP_WORDS_SET}).
+               /// </summary>
+               /// <deprecated> Use {@link #StandardAnalyzer(Version)} instead. 
+               /// </deprecated>
+        [Obsolete("Use StandardAnalyzer(Version) instead")]
+               public StandardAnalyzer():this(Version.LUCENE_24, STOP_WORDS_SET)
+               {
+               }
+               
+               /// <summary>Builds an analyzer with the default stop words ({@link
+               /// #STOP_WORDS}).
+               /// </summary>
+               /// <param name="matchVersion">Lucene version to match See {@link
+               /// <a href="#version">above</a>}
+               /// </param>
+               public StandardAnalyzer(Version matchVersion):this(matchVersion, STOP_WORDS_SET)
+               {
+               }
+               
+               /// <summary>Builds an analyzer with the given stop words.</summary>
+               /// <deprecated> Use {@link #StandardAnalyzer(Version, Set)}
+               /// instead 
+               /// </deprecated>
+        [Obsolete("Use StandardAnalyzer(Version, Set) instead")]
+               public StandardAnalyzer(System.Collections.Hashtable stopWords):this(Version.LUCENE_24, stopWords)
+               {
+               }
+               
+               /// <summary>Builds an analyzer with the given stop words.</summary>
+               /// <param name="matchVersion">Lucene version to match See {@link
+               /// <a href="#version">above</a>}
+               /// </param>
+               /// <param name="stopWords">stop words 
+               /// </param>
+               public StandardAnalyzer(Version matchVersion, System.Collections.Hashtable stopWords)
+               {
+                       stopSet = stopWords;
+                       Init(matchVersion);
+               }
+               
+               /// <summary>Builds an analyzer with the given stop words.</summary>
+               /// <deprecated> Use {@link #StandardAnalyzer(Version, Set)} instead 
+               /// </deprecated>
+        [Obsolete("Use StandardAnalyzer(Version, Set) instead")]
+               public StandardAnalyzer(System.String[] stopWords):this(Version.LUCENE_24, StopFilter.MakeStopSet(stopWords))
+               {
+               }
+               
+               /// <summary>Builds an analyzer with the stop words from the given file.</summary>
+               /// <seealso cref="WordlistLoader.GetWordSet(File)">
+               /// </seealso>
+               /// <deprecated> Use {@link #StandardAnalyzer(Version, File)}
+               /// instead
+               /// </deprecated>
+        [Obsolete("Use StandardAnalyzer(Version, File) instead")]
+               public StandardAnalyzer(System.IO.FileInfo stopwords):this(Version.LUCENE_24, stopwords)
+               {
+               }
+               
+               /// <summary>Builds an analyzer with the stop words from the given file.</summary>
+               /// <seealso cref="WordlistLoader.GetWordSet(File)">
+               /// </seealso>
+               /// <param name="matchVersion">Lucene version to match See {@link
+               /// <a href="#version">above</a>}
+               /// </param>
+               /// <param name="stopwords">File to read stop words from 
+               /// </param>
+               public StandardAnalyzer(Version matchVersion, System.IO.FileInfo stopwords)
+               {
+                       stopSet = WordlistLoader.GetWordSet(stopwords);
+                       Init(matchVersion);
+               }
+               
+               /// <summary>Builds an analyzer with the stop words from the given reader.</summary>
+               /// <seealso cref="WordlistLoader.GetWordSet(Reader)">
+               /// </seealso>
+               /// <deprecated> Use {@link #StandardAnalyzer(Version, Reader)}
+               /// instead
+               /// </deprecated>
+        [Obsolete("Use StandardAnalyzer(Version, Reader) instead")]
+               public StandardAnalyzer(System.IO.TextReader stopwords):this(Version.LUCENE_24, stopwords)
+               {
+               }
+               
+               /// <summary>Builds an analyzer with the stop words from the given reader.</summary>
+               /// <seealso cref="WordlistLoader.GetWordSet(Reader)">
+               /// </seealso>
+               /// <param name="matchVersion">Lucene version to match See {@link
+               /// <a href="#version">above</a>}
+               /// </param>
+               /// <param name="stopwords">Reader to read stop words from 
+               /// </param>
+               public StandardAnalyzer(Version matchVersion, System.IO.TextReader stopwords)
+               {
+                       stopSet = WordlistLoader.GetWordSet(stopwords);
+                       Init(matchVersion);
+               }
+               
+               /// <summary> </summary>
+               /// <param name="replaceInvalidAcronym">Set to true if this analyzer should replace mischaracterized acronyms in the StandardTokenizer
+               /// 
+               /// See https://issues.apache.org/jira/browse/LUCENE-1068
+               /// 
+               /// </param>
+               /// <deprecated> Remove in 3.X and make true the only valid value
+               /// </deprecated>
+        [Obsolete("Remove in 3.X and make true the only valid value")]
+               public StandardAnalyzer(bool replaceInvalidAcronym):this(Version.LUCENE_24, STOP_WORDS_SET)
+               {
+                       this.replaceInvalidAcronym = replaceInvalidAcronym;
+                       useDefaultStopPositionIncrements = true;
+               }
+               
+               /// <param name="stopwords">The stopwords to use
+               /// </param>
+               /// <param name="replaceInvalidAcronym">Set to true if this analyzer should replace mischaracterized acronyms in the StandardTokenizer
+               /// 
+               /// See https://issues.apache.org/jira/browse/LUCENE-1068
+               /// 
+               /// </param>
+               /// <deprecated> Remove in 3.X and make true the only valid value
+               /// </deprecated>
+        [Obsolete("Remove in 3.X and make true the only valid value")]
+               public StandardAnalyzer(System.IO.TextReader stopwords, bool replaceInvalidAcronym):this(Version.LUCENE_24, stopwords)
+               {
+                       this.replaceInvalidAcronym = replaceInvalidAcronym;
+               }
+               
+               /// <param name="stopwords">The stopwords to use
+               /// </param>
+               /// <param name="replaceInvalidAcronym">Set to true if this analyzer should replace mischaracterized acronyms in the StandardTokenizer
+               /// 
+               /// See https://issues.apache.org/jira/browse/LUCENE-1068
+               /// 
+               /// </param>
+               /// <deprecated> Remove in 3.X and make true the only valid value
+               /// </deprecated>
+        [Obsolete("Remove in 3.X and make true the only valid value")]
+               public StandardAnalyzer(System.IO.FileInfo stopwords, bool replaceInvalidAcronym):this(Version.LUCENE_24, stopwords)
+               {
+                       this.replaceInvalidAcronym = replaceInvalidAcronym;
+               }
+               
+               /// <summary> </summary>
+               /// <param name="stopwords">The stopwords to use
+               /// </param>
+               /// <param name="replaceInvalidAcronym">Set to true if this analyzer should replace mischaracterized acronyms in the StandardTokenizer
+               /// 
+               /// See https://issues.apache.org/jira/browse/LUCENE-1068
+               /// 
+               /// </param>
+               /// <deprecated> Remove in 3.X and make true the only valid value
+               /// </deprecated>
+        [Obsolete("Remove in 3.X and make true the only valid value")]
+               public StandardAnalyzer(System.String[] stopwords, bool replaceInvalidAcronym):this(Version.LUCENE_24, StopFilter.MakeStopSet(stopwords))
+               {
+                       this.replaceInvalidAcronym = replaceInvalidAcronym;
+               }
+               
+               /// <param name="stopwords">The stopwords to use
+               /// </param>
+               /// <param name="replaceInvalidAcronym">Set to true if this analyzer should replace mischaracterized acronyms in the StandardTokenizer
+               /// 
+               /// See https://issues.apache.org/jira/browse/LUCENE-1068
+               /// 
+               /// </param>
+               /// <deprecated> Remove in 3.X and make true the only valid value
+               /// </deprecated>
+        [Obsolete("Remove in 3.X and make true the only valid value")]
+               public StandardAnalyzer(System.Collections.Hashtable stopwords, bool replaceInvalidAcronym):this(Version.LUCENE_24, stopwords)
+               {
+                       this.replaceInvalidAcronym = replaceInvalidAcronym;
+               }
+               
+               private void  Init(Version matchVersion)
+               {
+                       SetOverridesTokenStreamMethod(typeof(StandardAnalyzer));
+                       if (matchVersion.OnOrAfter(Version.LUCENE_29))
+                       {
+                               enableStopPositionIncrements = true;
+                       }
+                       else
+                       {
+                               useDefaultStopPositionIncrements = true;
+                       }
+                       if (matchVersion.OnOrAfter(Version.LUCENE_24))
+                       {
+                               replaceInvalidAcronym = defaultReplaceInvalidAcronym;
+                       }
+                       else
+                       {
+                               replaceInvalidAcronym = false;
+                       }
+               }
+               
+               /// <summary>Constructs a {@link StandardTokenizer} filtered by a {@link
+               /// StandardFilter}, a {@link LowerCaseFilter} and a {@link StopFilter}. 
+               /// </summary>
+               public override TokenStream TokenStream(System.String fieldName, System.IO.TextReader reader)
+               {
+                       StandardTokenizer tokenStream = new StandardTokenizer(reader, replaceInvalidAcronym);
+                       tokenStream.SetMaxTokenLength(maxTokenLength);
+                       TokenStream result = new StandardFilter(tokenStream);
+                       result = new LowerCaseFilter(result);
+                       if (useDefaultStopPositionIncrements)
+                       {
+                               result = new StopFilter(result, stopSet);
+                       }
+                       else
+                       {
+                               result = new StopFilter(enableStopPositionIncrements, result, stopSet);
+                       }
+                       return result;
+               }
+               
+               private sealed class SavedStreams
+               {
+                       internal StandardTokenizer tokenStream;
+                       internal TokenStream filteredTokenStream;
+               }
+               
+               /// <summary>Default maximum allowed token length </summary>
+               public const int DEFAULT_MAX_TOKEN_LENGTH = 255;
+               
+               private int maxTokenLength = DEFAULT_MAX_TOKEN_LENGTH;
+               
+               /// <summary> Set maximum allowed token length.  If a token is seen
+               /// that exceeds this length then it is discarded.  This
+               /// setting only takes effect the next time tokenStream or
+               /// reusableTokenStream is called.
+               /// </summary>
+               public virtual void  SetMaxTokenLength(int length)
+               {
+                       maxTokenLength = length;
+               }
+               
+               /// <seealso cref="setMaxTokenLength">
+               /// </seealso>
+               public virtual int GetMaxTokenLength()
+               {
+                       return maxTokenLength;
+               }
+               
+               /// <deprecated> Use {@link #tokenStream} instead 
+               /// </deprecated>
+        [Obsolete("Use TokenStream instead")]
+               public override TokenStream ReusableTokenStream(System.String fieldName, System.IO.TextReader reader)
+               {
+                       if (overridesTokenStreamMethod)
+                       {
+                               // LUCENE-1678: force fallback to tokenStream() if we
+                               // have been subclassed and that subclass overrides
+                               // tokenStream but not reusableTokenStream
+                               return TokenStream(fieldName, reader);
+                       }
+                       SavedStreams streams = (SavedStreams) GetPreviousTokenStream();
+                       if (streams == null)
+                       {
+                               streams = new SavedStreams();
+                               SetPreviousTokenStream(streams);
+                               streams.tokenStream = new StandardTokenizer(reader);
+                               streams.filteredTokenStream = new StandardFilter(streams.tokenStream);
+                               streams.filteredTokenStream = new LowerCaseFilter(streams.filteredTokenStream);
+                               if (useDefaultStopPositionIncrements)
+                               {
+                                       streams.filteredTokenStream = new StopFilter(streams.filteredTokenStream, stopSet);
+                               }
+                               else
+                               {
+                                       streams.filteredTokenStream = new StopFilter(enableStopPositionIncrements, streams.filteredTokenStream, stopSet);
+                               }
+                       }
+                       else
+                       {
+                               streams.tokenStream.Reset(reader);
+                       }
+                       streams.tokenStream.SetMaxTokenLength(maxTokenLength);
+                       
+                       streams.tokenStream.SetReplaceInvalidAcronym(replaceInvalidAcronym);
+                       
+                       return streams.filteredTokenStream;
+               }
+               
+               /// <summary> </summary>
+               /// <returns> true if this Analyzer is replacing mischaracterized acronyms in the StandardTokenizer
+               /// 
+               /// See https://issues.apache.org/jira/browse/LUCENE-1068
+               /// </returns>
+               /// <deprecated> This will be removed (hardwired to true) in 3.0
+               /// </deprecated>
+        [Obsolete("This will be removed (hardwired to true) in 3.0")]
+               public virtual bool IsReplaceInvalidAcronym()
+               {
+                       return replaceInvalidAcronym;
+               }
+               
+               /// <summary> </summary>
+               /// <param name="replaceInvalidAcronym">Set to true if this Analyzer is replacing mischaracterized acronyms in the StandardTokenizer
+               /// 
+               /// See https://issues.apache.org/jira/browse/LUCENE-1068
+               /// </param>
+               /// <deprecated> This will be removed (hardwired to true) in 3.0
+               /// </deprecated>
+        [Obsolete("This will be removed (hardwired to true) in 3.0")]
+               public virtual void  SetReplaceInvalidAcronym(bool replaceInvalidAcronym)
+               {
+                       this.replaceInvalidAcronym = replaceInvalidAcronym;
+               }
+               static StandardAnalyzer()
+               {
+                       // Default to true (fixed the bug), unless the system prop is set
+                       {
+                               System.String v = SupportClass.AppSettings.Get("Mono.Lucene.Net.Analysis.Standard.StandardAnalyzer.replaceInvalidAcronym", "true");
+                               if (v == null || v.Equals("true"))
+                                       defaultReplaceInvalidAcronym = true;
+                               else
+                                       defaultReplaceInvalidAcronym = false;
+                       }
+                       STOP_WORDS = StopAnalyzer.ENGLISH_STOP_WORDS;
+                       STOP_WORDS_SET = StopAnalyzer.ENGLISH_STOP_WORDS_SET;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/Standard/StandardFilter.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/Standard/StandardFilter.cs
new file mode 100644 (file)
index 0000000..b06722c
--- /dev/null
@@ -0,0 +1,90 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using Token = Mono.Lucene.Net.Analysis.Token;
+using TokenFilter = Mono.Lucene.Net.Analysis.TokenFilter;
+using TokenStream = Mono.Lucene.Net.Analysis.TokenStream;
+using TermAttribute = Mono.Lucene.Net.Analysis.Tokenattributes.TermAttribute;
+using TypeAttribute = Mono.Lucene.Net.Analysis.Tokenattributes.TypeAttribute;
+
+namespace Mono.Lucene.Net.Analysis.Standard
+{
+       
+       /// <summary>Normalizes tokens extracted with {@link StandardTokenizer}. </summary>
+       
+       public sealed class StandardFilter:TokenFilter
+       {
+               
+               
+               /// <summary>Construct filtering <i>in</i>. </summary>
+               public StandardFilter(TokenStream in_Renamed):base(in_Renamed)
+               {
+                       termAtt = (TermAttribute) AddAttribute(typeof(TermAttribute));
+                       typeAtt = (TypeAttribute) AddAttribute(typeof(TypeAttribute));
+               }
+               
+               private static readonly System.String APOSTROPHE_TYPE;
+               private static readonly System.String ACRONYM_TYPE;
+               
+               // this filters uses attribute type
+               private TypeAttribute typeAtt;
+               private TermAttribute termAtt;
+               
+               /// <summary>Returns the next token in the stream, or null at EOS.
+               /// <p/>Removes <tt>'s</tt> from the end of words.
+               /// <p/>Removes dots from acronyms.
+               /// </summary>
+               public override bool IncrementToken()
+               {
+                       if (!input.IncrementToken())
+                       {
+                               return false;
+                       }
+                       
+                       char[] buffer = termAtt.TermBuffer();
+                       int bufferLength = termAtt.TermLength();
+                       System.String type = typeAtt.Type();
+                       
+                       if ((System.Object) type == (System.Object) APOSTROPHE_TYPE && bufferLength >= 2 && buffer[bufferLength - 2] == '\'' && (buffer[bufferLength - 1] == 's' || buffer[bufferLength - 1] == 'S'))
+                       {
+                               // Strip last 2 characters off
+                               termAtt.SetTermLength(bufferLength - 2);
+                       }
+                       else if ((System.Object) type == (System.Object) ACRONYM_TYPE)
+                       {
+                               // remove dots
+                               int upto = 0;
+                               for (int i = 0; i < bufferLength; i++)
+                               {
+                                       char c = buffer[i];
+                                       if (c != '.')
+                                               buffer[upto++] = c;
+                               }
+                               termAtt.SetTermLength(upto);
+                       }
+                       
+                       return true;
+               }
+               static StandardFilter()
+               {
+                       APOSTROPHE_TYPE = StandardTokenizerImpl.TOKEN_TYPES[StandardTokenizerImpl.APOSTROPHE];
+                       ACRONYM_TYPE = StandardTokenizerImpl.TOKEN_TYPES[StandardTokenizerImpl.ACRONYM];
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/Standard/StandardTokenizer.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/Standard/StandardTokenizer.cs
new file mode 100644 (file)
index 0000000..f7caaf5
--- /dev/null
@@ -0,0 +1,351 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using CharReader = Mono.Lucene.Net.Analysis.CharReader;
+using Token = Mono.Lucene.Net.Analysis.Token;
+using Tokenizer = Mono.Lucene.Net.Analysis.Tokenizer;
+using OffsetAttribute = Mono.Lucene.Net.Analysis.Tokenattributes.OffsetAttribute;
+using PositionIncrementAttribute = Mono.Lucene.Net.Analysis.Tokenattributes.PositionIncrementAttribute;
+using TermAttribute = Mono.Lucene.Net.Analysis.Tokenattributes.TermAttribute;
+using TypeAttribute = Mono.Lucene.Net.Analysis.Tokenattributes.TypeAttribute;
+using AttributeSource = Mono.Lucene.Net.Util.AttributeSource;
+using Version = Mono.Lucene.Net.Util.Version;
+
+namespace Mono.Lucene.Net.Analysis.Standard
+{
+       
+       /// <summary>A grammar-based tokenizer constructed with JFlex
+       /// 
+       /// <p/> This should be a good tokenizer for most European-language documents:
+       /// 
+       /// <ul>
+       /// <li>Splits words at punctuation characters, removing punctuation. However, a 
+       /// dot that's not followed by whitespace is considered part of a token.</li>
+       /// <li>Splits words at hyphens, unless there's a number in the token, in which case
+       /// the whole token is interpreted as a product number and is not split.</li>
+       /// <li>Recognizes email addresses and internet hostnames as one token.</li>
+       /// </ul>
+       /// 
+       /// <p/>Many applications have specific tokenizer needs.  If this tokenizer does
+       /// not suit your application, please consider copying this source code
+       /// directory to your project and maintaining your own grammar-based tokenizer.
+       /// 
+       /// <a name="version"/>
+       /// <p/>
+       /// You must specify the required {@link Version} compatibility when creating
+       /// StandardAnalyzer:
+       /// <ul>
+       /// <li>As of 2.4, Tokens incorrectly identified as acronyms are corrected (see
+       /// <a href="https://issues.apache.org/jira/browse/LUCENE-1068">LUCENE-1608</a></li>
+       /// </ul>
+       /// </summary>
+       
+       public class StandardTokenizer:Tokenizer
+       {
+               private void  InitBlock()
+               {
+                       maxTokenLength = StandardAnalyzer.DEFAULT_MAX_TOKEN_LENGTH;
+               }
+               /// <summary>A private instance of the JFlex-constructed scanner </summary>
+               private StandardTokenizerImpl scanner;
+               
+               public const int ALPHANUM = 0;
+               public const int APOSTROPHE = 1;
+               public const int ACRONYM = 2;
+               public const int COMPANY = 3;
+               public const int EMAIL = 4;
+               public const int HOST = 5;
+               public const int NUM = 6;
+               public const int CJ = 7;
+               
+               /// <deprecated> this solves a bug where HOSTs that end with '.' are identified
+               /// as ACRONYMs. It is deprecated and will be removed in the next
+               /// release.
+               /// </deprecated>
+        [Obsolete("this solves a bug where HOSTs that end with '.' are identified as ACRONYMs. It is deprecated and will be removed in the next release.")]
+               public const int ACRONYM_DEP = 8;
+               
+               /// <summary>String token types that correspond to token type int constants </summary>
+               public static readonly System.String[] TOKEN_TYPES = new System.String[]{"<ALPHANUM>", "<APOSTROPHE>", "<ACRONYM>", "<COMPANY>", "<EMAIL>", "<HOST>", "<NUM>", "<CJ>", "<ACRONYM_DEP>"};
+               
+               /// <deprecated> Please use {@link #TOKEN_TYPES} instead 
+               /// </deprecated>
+        [Obsolete("Please use TOKEN_TYPES instead")]
+               public static readonly System.String[] tokenImage = TOKEN_TYPES;
+               
+               /// <summary> Specifies whether deprecated acronyms should be replaced with HOST type.
+               /// This is false by default to support backward compatibility.
+               /// <p/>
+               /// See http://issues.apache.org/jira/browse/LUCENE-1068
+               /// 
+               /// </summary>
+               /// <deprecated> this should be removed in the next release (3.0).
+               /// </deprecated>
+        [Obsolete("this should be removed in the next release (3.0).")]
+               private bool replaceInvalidAcronym;
+               
+               private int maxTokenLength;
+               
+               /// <summary>Set the max allowed token length.  Any token longer
+               /// than this is skipped. 
+               /// </summary>
+               public virtual void  SetMaxTokenLength(int length)
+               {
+                       this.maxTokenLength = length;
+               }
+               
+               /// <seealso cref="setMaxTokenLength">
+               /// </seealso>
+               public virtual int GetMaxTokenLength()
+               {
+                       return maxTokenLength;
+               }
+               
+               /// <summary> Creates a new instance of the {@link StandardTokenizer}. Attaches the
+               /// <code>input</code> to a newly created JFlex scanner.
+               /// </summary>
+               /// <deprecated> Use {@link #StandardTokenizer(Version, Reader)} instead
+               /// </deprecated>
+        [Obsolete("Use StandardTokenizer(Version, Reader) instead")]
+               public StandardTokenizer(System.IO.TextReader input):this(Version.LUCENE_24, input)
+               {
+               }
+               
+               /// <summary> Creates a new instance of the {@link Mono.Lucene.Net.Analysis.Standard.StandardTokenizer}.  Attaches
+               /// the <code>input</code> to the newly created JFlex scanner.
+               /// 
+               /// </summary>
+               /// <param name="input">The input reader
+               /// </param>
+               /// <param name="replaceInvalidAcronym">Set to true to replace mischaracterized acronyms with HOST.
+               /// 
+               /// See http://issues.apache.org/jira/browse/LUCENE-1068
+               /// </param>
+               /// <deprecated> Use {@link #StandardTokenizer(Version, Reader)} instead
+               /// </deprecated>
+        [Obsolete("Use StandardTokenizer(Version, Reader) instead")]
+               public StandardTokenizer(System.IO.TextReader input, bool replaceInvalidAcronym):base()
+               {
+                       InitBlock();
+                       this.scanner = new StandardTokenizerImpl(input);
+                       Init(input, replaceInvalidAcronym);
+               }
+               
+               /// <summary> Creates a new instance of the
+               /// {@link org.apache.lucene.analysis.standard.StandardTokenizer}. Attaches
+               /// the <code>input</code> to the newly created JFlex scanner.
+               /// 
+               /// </summary>
+               /// <param name="input">The input reader
+               /// 
+               /// See http://issues.apache.org/jira/browse/LUCENE-1068
+               /// </param>
+               public StandardTokenizer(Version matchVersion, System.IO.TextReader input):base()
+               {
+                       InitBlock();
+                       this.scanner = new StandardTokenizerImpl(input);
+                       Init(input, matchVersion);
+               }
+               
+               /// <summary> Creates a new StandardTokenizer with a given {@link AttributeSource}. </summary>
+               /// <deprecated> Use
+               /// {@link #StandardTokenizer(Version, AttributeSource, Reader)}
+               /// instead
+               /// </deprecated>
+        [Obsolete("Use StandardTokenizer(Version, AttributeSource, Reader) instead")]
+               public StandardTokenizer(AttributeSource source, System.IO.TextReader input, bool replaceInvalidAcronym):base(source)
+               {
+                       InitBlock();
+                       this.scanner = new StandardTokenizerImpl(input);
+                       Init(input, replaceInvalidAcronym);
+               }
+               
+               /// <summary> Creates a new StandardTokenizer with a given {@link AttributeSource}.</summary>
+               public StandardTokenizer(Version matchVersion, AttributeSource source, System.IO.TextReader input):base(source)
+               {
+                       InitBlock();
+                       this.scanner = new StandardTokenizerImpl(input);
+                       Init(input, matchVersion);
+               }
+               
+               /// <summary> Creates a new StandardTokenizer with a given {@link Mono.Lucene.Net.Util.AttributeSource.AttributeFactory} </summary>
+               /// <deprecated> Use
+               /// {@link #StandardTokenizer(Version, org.apache.lucene.util.AttributeSource.AttributeFactory, Reader)}
+               /// instead
+               /// </deprecated>
+        [Obsolete("Use StandardTokenizer(Version, Mono.Lucene.Net.Util.AttributeSource.AttributeFactory, Reader) instead")]
+               public StandardTokenizer(AttributeFactory factory, System.IO.TextReader input, bool replaceInvalidAcronym):base(factory)
+               {
+                       InitBlock();
+                       this.scanner = new StandardTokenizerImpl(input);
+                       Init(input, replaceInvalidAcronym);
+               }
+               
+               /// <summary> Creates a new StandardTokenizer with a given
+               /// {@link org.apache.lucene.util.AttributeSource.AttributeFactory}
+               /// </summary>
+               public StandardTokenizer(Version matchVersion, AttributeFactory factory, System.IO.TextReader input):base(factory)
+               {
+                       InitBlock();
+                       this.scanner = new StandardTokenizerImpl(input);
+                       Init(input, matchVersion);
+               }
+               
+               private void  Init(System.IO.TextReader input, bool replaceInvalidAcronym)
+               {
+                       this.replaceInvalidAcronym = replaceInvalidAcronym;
+                       this.input = input;
+                       termAtt = (TermAttribute) AddAttribute(typeof(TermAttribute));
+                       offsetAtt = (OffsetAttribute) AddAttribute(typeof(OffsetAttribute));
+                       posIncrAtt = (PositionIncrementAttribute) AddAttribute(typeof(PositionIncrementAttribute));
+                       typeAtt = (TypeAttribute) AddAttribute(typeof(TypeAttribute));
+               }
+               
+               private void  Init(System.IO.TextReader input, Version matchVersion)
+               {
+                       if (matchVersion.OnOrAfter(Version.LUCENE_24))
+                       {
+                               Init(input, true);
+                       }
+                       else
+                       {
+                               Init(input, false);
+                       }
+               }
+               
+               // this tokenizer generates three attributes:
+               // offset, positionIncrement and type
+               private TermAttribute termAtt;
+               private OffsetAttribute offsetAtt;
+               private PositionIncrementAttribute posIncrAtt;
+               private TypeAttribute typeAtt;
+               
+               /*
+               * (non-Javadoc)
+               *
+               * @see Mono.Lucene.Net.Analysis.TokenStream#next()
+               */
+               public override bool IncrementToken()
+               {
+                       ClearAttributes();
+                       int posIncr = 1;
+                       
+                       while (true)
+                       {
+                               int tokenType = scanner.GetNextToken();
+                               
+                               if (tokenType == StandardTokenizerImpl.YYEOF)
+                               {
+                                       return false;
+                               }
+                               
+                               if (scanner.Yylength() <= maxTokenLength)
+                               {
+                                       posIncrAtt.SetPositionIncrement(posIncr);
+                                       scanner.GetText(termAtt);
+                                       int start = scanner.Yychar();
+                                       offsetAtt.SetOffset(CorrectOffset(start), CorrectOffset(start + termAtt.TermLength()));
+                                       // This 'if' should be removed in the next release. For now, it converts
+                                       // invalid acronyms to HOST. When removed, only the 'else' part should
+                                       // remain.
+                                       if (tokenType == StandardTokenizerImpl.ACRONYM_DEP)
+                                       {
+                                               if (replaceInvalidAcronym)
+                                               {
+                                                       typeAtt.SetType(StandardTokenizerImpl.TOKEN_TYPES[StandardTokenizerImpl.HOST]);
+                                                       termAtt.SetTermLength(termAtt.TermLength() - 1); // remove extra '.'
+                                               }
+                                               else
+                                               {
+                                                       typeAtt.SetType(StandardTokenizerImpl.TOKEN_TYPES[StandardTokenizerImpl.ACRONYM]);
+                                               }
+                                       }
+                                       else
+                                       {
+                                               typeAtt.SetType(StandardTokenizerImpl.TOKEN_TYPES[tokenType]);
+                                       }
+                                       return true;
+                               }
+                               // When we skip a too-long term, we still increment the
+                               // position increment
+                               else
+                                       posIncr++;
+                       }
+               }
+               
+               public override void  End()
+               {
+                       // set final offset
+                       int finalOffset = CorrectOffset(scanner.Yychar() + scanner.Yylength());
+                       offsetAtt.SetOffset(finalOffset, finalOffset);
+               }
+               
+               /// <deprecated> Will be removed in Lucene 3.0. This method is final, as it should
+               /// not be overridden. Delegates to the backwards compatibility layer. 
+               /// </deprecated>
+        [Obsolete("Will be removed in Lucene 3.0. This method is final, as it should not be overridden. Delegates to the backwards compatibility layer. ")]
+               public override Token Next(Token reusableToken)
+               {
+                       return base.Next(reusableToken);
+               }
+               
+               /// <deprecated> Will be removed in Lucene 3.0. This method is final, as it should
+               /// not be overridden. Delegates to the backwards compatibility layer. 
+               /// </deprecated>
+        [Obsolete("Will be removed in Lucene 3.0. This method is final, as it should not be overridden. Delegates to the backwards compatibility layer. ")]
+               public override Token Next()
+               {
+                       return base.Next();
+               }
+               
+                               
+               public override void  Reset(System.IO.TextReader reader)
+               {
+                       base.Reset(reader);
+                       scanner.Reset(reader);
+               }
+               
+               /// <summary> Prior to https://issues.apache.org/jira/browse/LUCENE-1068, StandardTokenizer mischaracterized as acronyms tokens like www.abc.com
+               /// when they should have been labeled as hosts instead.
+               /// </summary>
+               /// <returns> true if StandardTokenizer now returns these tokens as Hosts, otherwise false
+               /// 
+               /// </returns>
+               /// <deprecated> Remove in 3.X and make true the only valid value
+               /// </deprecated>
+        [Obsolete("Remove in 3.X and make true the only valid value")]
+               public virtual bool IsReplaceInvalidAcronym()
+               {
+                       return replaceInvalidAcronym;
+               }
+               
+               /// <summary> </summary>
+               /// <param name="replaceInvalidAcronym">Set to true to replace mischaracterized acronyms as HOST.
+               /// </param>
+               /// <deprecated> Remove in 3.X and make true the only valid value
+               /// 
+               /// See https://issues.apache.org/jira/browse/LUCENE-1068
+               /// </deprecated>
+        [Obsolete("Remove in 3.X and make true the only valid value. See https://issues.apache.org/jira/browse/LUCENE-1068")]
+               public virtual void  SetReplaceInvalidAcronym(bool replaceInvalidAcronym)
+               {
+                       this.replaceInvalidAcronym = replaceInvalidAcronym;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/Standard/StandardTokenizerImpl.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/Standard/StandardTokenizerImpl.cs
new file mode 100644 (file)
index 0000000..55ddc80
--- /dev/null
@@ -0,0 +1,709 @@
+๏ปฟ/* The following code was generated by JFlex 1.4.1 on 9/4/08 6:49 PM */
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+/*
+
+NOTE: if you change this file and need to regenerate the tokenizer,
+remember to use JRE 1.4 when running jflex (before Lucene 3.0).
+This grammar now uses constructs (eg :digit:) whose meaning can
+vary according to the JRE used to run jflex.  See
+https://issues.apache.org/jira/browse/LUCENE-1126 for details
+*/
+
+using System;
+
+using Token = Mono.Lucene.Net.Analysis.Token;
+using TermAttribute = Mono.Lucene.Net.Analysis.Tokenattributes.TermAttribute;
+
+namespace Mono.Lucene.Net.Analysis.Standard
+{
+       
+       
+       /// <summary> This class is a scanner generated by 
+       /// <a href="http://www.jflex.de/">JFlex</a> 1.4.1
+       /// on 9/4/08 6:49 PM from the specification file
+       /// <tt>/tango/mike/src/lucene.standarddigit/src/java/org/apache/lucene/analysis/standard/StandardTokenizerImpl.jflex</tt>
+       /// </summary>
+       class StandardTokenizerImpl
+       {
+               
+               /// <summary>This character denotes the end of file </summary>
+               public const int YYEOF = - 1;
+               
+               /// <summary>initial size of the lookahead buffer </summary>
+               private const int ZZ_BUFFERSIZE = 16384;
+               
+               /// <summary>lexical states </summary>
+               public const int YYINITIAL = 0;
+               
+               /// <summary> Translates characters to character classes</summary>
+               private const System.String ZZ_CMAP_PACKED = "\x0009\x0000\x0001\x0000\x0001\x000D\x0001\x0000\x0001\x0000\x0001\x000C\x0012\x0000\x0001\x0000\x0005\x0000\x0001\x0005" + "\x0001\x0003\x0004\x0000\x0001\x0009\x0001\x0007\x0001\x0004\x0001\x0009\x000A\x0002\x0006\x0000\x0001\x0006\x001A\x000A" + "\x0004\x0000\x0001\x0008\x0001\x0000\x001A\x000A\x002F\x0000\x0001\x000A\x000A\x0000\x0001\x000A\x0004\x0000\x0001\x000A" + "\x0005\x0000\x0017\x000A\x0001\x0000\x001F\x000A\x0001\x0000\u0128\x000A\x0002\x0000\x0012\x000A\x001C\x0000\x005E\x000A" + "\x0002\x0000\x0009\x000A\x0002\x0000\x0007\x000A\x000E\x0000\x0002\x000A\x000E\x0000\x0005\x000A\x0009\x0000\x0001\x000A" + "\x008B\x0000\x0001\x000A\x000B\x0000\x0001\x000A\x0001\x0000\x0003\x000A\x0001\x0000\x0001\x000A\x0001\x0000\x0014\x000A" + "\x0001\x0000\x002C\x000A\x0001\x0000\x0008\x000A\x0002\x0000\x001A\x000A\x000C\x0000\x0082\x000A\x000A\x0000\x0039\x000A" + "\x0002\x0000\x0002\x000A\x0002\x0000\x0002\x000A\x0003\x0000\x0026\x000A\x0002\x0000\x0002\x000A\x0037\x0000\x0026\x000A" + "\x0002\x0000\x0001\x000A\x0007\x0000\x0027\x000A\x0048\x0000\x001B\x000A\x0005\x0000\x0003\x000A\x002E\x0000\x001A\x000A" + "\x0005\x0000\x000B\x000A\x0015\x0000\x000A\x0002\x0007\x0000\x0063\x000A\x0001\x0000\x0001\x000A\x000F\x0000\x0002\x000A" + "\x0009\x0000\x000A\x0002\x0003\x000A\x0013\x0000\x0001\x000A\x0001\x0000\x001B\x000A\x0053\x0000\x0026\x000A\u015f\x0000" + "\x0035\x000A\x0003\x0000\x0001\x000A\x0012\x0000\x0001\x000A\x0007\x0000\x000A\x000A\x0004\x0000\x000A\x0002\x0015\x0000" + "\x0008\x000A\x0002\x0000\x0002\x000A\x0002\x0000\x0016\x000A\x0001\x0000\x0007\x000A\x0001\x0000\x0001\x000A\x0003\x0000" + "\x0004\x000A\x0022\x0000\x0002\x000A\x0001\x0000\x0003\x000A\x0004\x0000\x000A\x0002\x0002\x000A\x0013\x0000\x0006\x000A" + "\x0004\x0000\x0002\x000A\x0002\x0000\x0016\x000A\x0001\x0000\x0007\x000A\x0001\x0000\x0002\x000A\x0001\x0000\x0002\x000A" + 
+                       "\x0001\x0000\x0002\x000A\x001F\x0000\x0004\x000A\x0001\x0000\x0001\x000A\x0007\x0000\x000A\x0002\x0002\x0000\x0003\x000A" + "\x0010\x0000\x0007\x000A\x0001\x0000\x0001\x000A\x0001\x0000\x0003\x000A\x0001\x0000\x0016\x000A\x0001\x0000\x0007\x000A" + "\x0001\x0000\x0002\x000A\x0001\x0000\x0005\x000A\x0003\x0000\x0001\x000A\x0012\x0000\x0001\x000A\x000F\x0000\x0001\x000A" + "\x0005\x0000\x000A\x0002\x0015\x0000\x0008\x000A\x0002\x0000\x0002\x000A\x0002\x0000\x0016\x000A\x0001\x0000\x0007\x000A" + "\x0001\x0000\x0002\x000A\x0002\x0000\x0004\x000A\x0003\x0000\x0001\x000A\x001E\x0000\x0002\x000A\x0001\x0000\x0003\x000A" + "\x0004\x0000\x000A\x0002\x0015\x0000\x0006\x000A\x0003\x0000\x0003\x000A\x0001\x0000\x0004\x000A\x0003\x0000\x0002\x000A" + "\x0001\x0000\x0001\x000A\x0001\x0000\x0002\x000A\x0003\x0000\x0002\x000A\x0003\x0000\x0003\x000A\x0003\x0000\x0008\x000A" + "\x0001\x0000\x0003\x000A\x002D\x0000\x0009\x0002\x0015\x0000\x0008\x000A\x0001\x0000\x0003\x000A\x0001\x0000\x0017\x000A" + "\x0001\x0000\x000A\x000A\x0001\x0000\x0005\x000A\x0026\x0000\x0002\x000A\x0004\x0000\x000A\x0002\x0015\x0000\x0008\x000A" + "\x0001\x0000\x0003\x000A\x0001\x0000\x0017\x000A\x0001\x0000\x000A\x000A\x0001\x0000\x0005\x000A\x0024\x0000\x0001\x000A" + "\x0001\x0000\x0002\x000A\x0004\x0000\x000A\x0002\x0015\x0000\x0008\x000A\x0001\x0000\x0003\x000A\x0001\x0000\x0017\x000A" + "\x0001\x0000\x0010\x000A\x0026\x0000\x0002\x000A\x0004\x0000\x000A\x0002\x0015\x0000\x0012\x000A\x0003\x0000\x0018\x000A" + "\x0001\x0000\x0009\x000A\x0001\x0000\x0001\x000A\x0002\x0000\x0007\x000A\x0039\x0000\x0001\x0001\x0030\x000A\x0001\x0001" + "\x0002\x000A\x000C\x0001\x0007\x000A\x0009\x0001\x000A\x0002\x0027\x0000\x0002\x000A\x0001\x0000\x0001\x000A\x0002\x0000" + "\x0002\x000A\x0001\x0000\x0001\x000A\x0002\x0000\x0001\x000A\x0006\x0000\x0004\x000A\x0001\x0000\x0007\x000A\x0001\x0000" + "\x0003\x000A\x0001\x0000\x0001\x000A\x0001\x0000\x0001\x000A\x0002\x0000\x0002\x000A\x0001\x0000\x0004\x000A\x0001\x0000" + 
+                       "\x0002\x000A\x0009\x0000\x0001\x000A\x0002\x0000\x0005\x000A\x0001\x0000\x0001\x000A\x0009\x0000\x000A\x0002\x0002\x0000" + "\x0002\x000A\x0022\x0000\x0001\x000A\x001F\x0000\x000A\x0002\x0016\x0000\x0008\x000A\x0001\x0000\x0022\x000A\x001D\x0000" + "\x0004\x000A\x0074\x0000\x0022\x000A\x0001\x0000\x0005\x000A\x0001\x0000\x0002\x000A\x0015\x0000\x000A\x0002\x0006\x0000" + "\x0006\x000A\x004A\x0000\x0026\x000A\x000A\x0000\x0027\x000A\x0009\x0000\x005A\x000A\x0005\x0000\x0044\x000A\x0005\x0000" + "\x0052\x000A\x0006\x0000\x0007\x000A\x0001\x0000\x003F\x000A\x0001\x0000\x0001\x000A\x0001\x0000\x0004\x000A\x0002\x0000" + "\x0007\x000A\x0001\x0000\x0001\x000A\x0001\x0000\x0004\x000A\x0002\x0000\x0027\x000A\x0001\x0000\x0001\x000A\x0001\x0000" + "\x0004\x000A\x0002\x0000\x001F\x000A\x0001\x0000\x0001\x000A\x0001\x0000\x0004\x000A\x0002\x0000\x0007\x000A\x0001\x0000" + "\x0001\x000A\x0001\x0000\x0004\x000A\x0002\x0000\x0007\x000A\x0001\x0000\x0007\x000A\x0001\x0000\x0017\x000A\x0001\x0000" + "\x001F\x000A\x0001\x0000\x0001\x000A\x0001\x0000\x0004\x000A\x0002\x0000\x0007\x000A\x0001\x0000\x0027\x000A\x0001\x0000" + "\x0013\x000A\x000E\x0000\x0009\x0002\x002E\x0000\x0055\x000A\x000C\x0000\u026c\x000A\x0002\x0000\x0008\x000A\x000A\x0000" + "\x001A\x000A\x0005\x0000\x004B\x000A\x0095\x0000\x0034\x000A\x002C\x0000\x000A\x0002\x0026\x0000\x000A\x0002\x0006\x0000" + "\x0058\x000A\x0008\x0000\x0029\x000A\u0557\x0000\x009C\x000A\x0004\x0000\x005A\x000A\x0006\x0000\x0016\x000A\x0002\x0000" + "\x0006\x000A\x0002\x0000\x0026\x000A\x0002\x0000\x0006\x000A\x0002\x0000\x0008\x000A\x0001\x0000\x0001\x000A\x0001\x0000" + "\x0001\x000A\x0001\x0000\x0001\x000A\x0001\x0000\x001F\x000A\x0002\x0000\x0035\x000A\x0001\x0000\x0007\x000A\x0001\x0000" + "\x0001\x000A\x0003\x0000\x0003\x000A\x0001\x0000\x0007\x000A\x0003\x0000\x0004\x000A\x0002\x0000\x0006\x000A\x0004\x0000" + "\x000D\x000A\x0005\x0000\x0003\x000A\x0001\x0000\x0007\x000A\x0082\x0000\x0001\x000A\x0082\x0000\x0001\x000A\x0004\x0000" + 
+                       "\x0001\x000A\x0002\x0000\x000A\x000A\x0001\x0000\x0001\x000A\x0003\x0000\x0005\x000A\x0006\x0000\x0001\x000A\x0001\x0000" + "\x0001\x000A\x0001\x0000\x0001\x000A\x0001\x0000\x0004\x000A\x0001\x0000\x0003\x000A\x0001\x0000\x0007\x000A\u0ecb\x0000" + "\x0002\x000A\x002A\x0000\x0005\x000A\x000A\x0000\x0001\x000B\x0054\x000B\x0008\x000B\x0002\x000B\x0002\x000B\x005A\x000B" + "\x0001\x000B\x0003\x000B\x0006\x000B\x0028\x000B\x0003\x000B\x0001\x0000\x005E\x000A\x0011\x0000\x0018\x000A\x0038\x0000" + "\x0010\x000B\u0100\x0000\x0080\x000B\x0080\x0000\u19b6\x000B\x000A\x000B\x0040\x0000\u51a6\x000B\x005A\x000B\u048d\x000A" + "\u0773\x0000\u2ba4\x000A\u215c\x0000\u012e\x000B\x00D2\x000B\x0007\x000A\x000C\x0000\x0005\x000A\x0005\x0000\x0001\x000A" + "\x0001\x0000\x000A\x000A\x0001\x0000\x000D\x000A\x0001\x0000\x0005\x000A\x0001\x0000\x0001\x000A\x0001\x0000\x0002\x000A" + "\x0001\x0000\x0002\x000A\x0001\x0000\x006C\x000A\x0021\x0000\u016b\x000A\x0012\x0000\x0040\x000A\x0002\x0000\x0036\x000A" + "\x0028\x0000\x000C\x000A\x0074\x0000\x0003\x000A\x0001\x0000\x0001\x000A\x0001\x0000\x0087\x000A\x0013\x0000\x000A\x0002" + "\x0007\x0000\x001A\x000A\x0006\x0000\x001A\x000A\x000A\x0000\x0001\x000B\x003A\x000B\x001F\x000A\x0003\x0000\x0006\x000A" + "\x0002\x0000\x0006\x000A\x0002\x0000\x0006\x000A\x0002\x0000\x0003\x000A\x0023\x0000";
+               
+               /// <summary> Translates characters to character classes</summary>
+               private static readonly char[] ZZ_CMAP = ZzUnpackCMap(ZZ_CMAP_PACKED);
+               
+               /// <summary> Translates DFA states to action switch labels.</summary>
+               private static readonly int[] ZZ_ACTION = ZzUnpackAction();
+               
+               private const System.String ZZ_ACTION_PACKED_0 = "\x0001\x0000\x0001\x0001\x0003\x0002\x0001\x0003\x0001\x0001\x000B\x0000\x0001\x0002\x0003\x0004" + "\x0002\x0000\x0001\x0005\x0001\x0000\x0001\x0005\x0003\x0004\x0006\x0005\x0001\x0006\x0001\x0004" + "\x0002\x0007\x0001\x0008\x0001\x0000\x0001\x0008\x0003\x0000\x0002\x0008\x0001\x0009\x0001\x000A" + "\x0001\x0004";
+               
+               private static int[] ZzUnpackAction()
+               {
+                       int[] result = new int[51];
+                       int offset = 0;
+                       offset = ZzUnpackAction(ZZ_ACTION_PACKED_0, offset, result);
+                       return result;
+               }
+               
+               private static int ZzUnpackAction(System.String packed, int offset, int[] result)
+               {
+                       int i = 0; /* index in packed string  */
+                       int j = offset; /* index in unpacked array */
+                       int l = packed.Length;
+                       while (i < l)
+                       {
+                               int count = packed[i++];
+                               int value_Renamed = packed[i++];
+                               do 
+                                       result[j++] = value_Renamed;
+                               while (--count > 0);
+                       }
+                       return j;
+               }
+               
+               
+               /// <summary> Translates a state to a row index in the transition table</summary>
+               private static readonly int[] ZZ_ROWMAP = ZzUnpackRowMap();
+               
+               private const System.String ZZ_ROWMAP_PACKED_0 = "\x0000\x0000\x0000\x000E\x0000\x001C\x0000\x002A\x0000\x0038\x0000\x000E\x0000\x0046\x0000\x0054" + "\x0000\x0062\x0000\x0070\x0000\x007E\x0000\x008C\x0000\x009A\x0000\x00A8\x0000\x00B6\x0000\x00C4" + "\x0000\x00D2\x0000\x00E0\x0000\x00EE\x0000\x00FC\x0000\u010a\x0000\u0118\x0000\u0126\x0000\u0134" + "\x0000\u0142\x0000\u0150\x0000\u015e\x0000\u016c\x0000\u017a\x0000\u0188\x0000\u0196\x0000\u01a4" + "\x0000\u01b2\x0000\u01c0\x0000\u01ce\x0000\u01dc\x0000\u01ea\x0000\u01f8\x0000\x00D2\x0000\u0206" + "\x0000\u0214\x0000\u0222\x0000\u0230\x0000\u023e\x0000\u024c\x0000\u025a\x0000\x0054\x0000\x008C" + "\x0000\u0268\x0000\u0276\x0000\u0284";
+               
+               private static int[] ZzUnpackRowMap()
+               {
+                       int[] result = new int[51];
+                       int offset = 0;
+                       offset = ZzUnpackRowMap(ZZ_ROWMAP_PACKED_0, offset, result);
+                       return result;
+               }
+               
+               private static int ZzUnpackRowMap(System.String packed, int offset, int[] result)
+               {
+                       int i = 0; /* index in packed string  */
+                       int j = offset; /* index in unpacked array */
+                       int l = packed.Length;
+                       while (i < l)
+                       {
+                               int high = packed[i++] << 16;
+                               result[j++] = high | packed[i++];
+                       }
+                       return j;
+               }
+               
+               /// <summary> The transition table of the DFA</summary>
+               private static readonly int[] ZZ_TRANS = ZzUnpackTrans();
+               
+               private const System.String ZZ_TRANS_PACKED_0 = "\x0001\x0002\x0001\x0003\x0001\x0004\x0007\x0002\x0001\x0005\x0001\x0006\x0001\x0007\x0001\x0002" + "\x000F\x0000\x0002\x0003\x0001\x0000\x0001\x0008\x0001\x0000\x0001\x0009\x0002\x000A\x0001\x000B" + "\x0001\x0003\x0004\x0000\x0001\x0003\x0001\x0004\x0001\x0000\x0001\x000C\x0001\x0000\x0001\x0009" + "\x0002\x000D\x0001\x000E\x0001\x0004\x0004\x0000\x0001\x0003\x0001\x0004\x0001\x000F\x0001\x0010" + "\x0001\x0011\x0001\x0012\x0002\x000A\x0001\x000B\x0001\x0013\x0010\x0000\x0001\x0002\x0001\x0000" + "\x0001\x0014\x0001\x0015\x0007\x0000\x0001\x0016\x0004\x0000\x0002\x0017\x0007\x0000\x0001\x0017" + "\x0004\x0000\x0001\x0018\x0001\x0019\x0007\x0000\x0001\x001A\x0005\x0000\x0001\x001B\x0007\x0000" + "\x0001\x000B\x0004\x0000\x0001\x001C\x0001\x001D\x0007\x0000\x0001\x001E\x0004\x0000\x0001\x001F" + "\x0001\x0020\x0007\x0000\x0001\x0021\x0004\x0000\x0001\x0022\x0001\x0023\x0007\x0000\x0001\x0024" + "\x000D\x0000\x0001\x0025\x0004\x0000\x0001\x0014\x0001\x0015\x0007\x0000\x0001\x0026\x000D\x0000" + "\x0001\x0027\x0004\x0000\x0002\x0017\x0007\x0000\x0001\x0028\x0004\x0000\x0001\x0003\x0001\x0004" + "\x0001\x000F\x0001\x0008\x0001\x0011\x0001\x0012\x0002\x000A\x0001\x000B\x0001\x0013\x0004\x0000" + "\x0002\x0014\x0001\x0000\x0001\x0029\x0001\x0000\x0001\x0009\x0002\x002A\x0001\x0000\x0001\x0014" + "\x0004\x0000\x0001\x0014\x0001\x0015\x0001\x0000\x0001\x002B\x0001\x0000\x0001\x0009\x0002\x002C" + "\x0001\x002D\x0001\x0015\x0004\x0000\x0001\x0014\x0001\x0015\x0001\x0000\x0001\x0029\x0001\x0000" + "\x0001\x0009\x0002\x002A\x0001\x0000\x0001\x0016\x0004\x0000\x0002\x0017\x0001\x0000\x0001\x002E" + "\x0002\x0000\x0001\x002E\x0002\x0000\x0001\x0017\x0004\x0000\x0002\x0018\x0001\x0000\x0001\x002A" + "\x0001\x0000\x0001\x0009\x0002\x002A\x0001\x0000\x0001\x0018\x0004\x0000\x0001\x0018\x0001\x0019" + "\x0001\x0000\x0001\x002C\x0001\x0000\x0001\x0009\x0002\x002C\x0001\x002D\x0001\x0019\x0004\x0000" + 
+                       "\x0001\x0018\x0001\x0019\x0001\x0000\x0001\x002A\x0001\x0000\x0001\x0009\x0002\x002A\x0001\x0000" + "\x0001\x001A\x0005\x0000\x0001\x001B\x0001\x0000\x0001\x002D\x0002\x0000\x0003\x002D\x0001\x001B" + "\x0004\x0000\x0002\x001C\x0001\x0000\x0001\x002F\x0001\x0000\x0001\x0009\x0002\x000A\x0001\x000B" + "\x0001\x001C\x0004\x0000\x0001\x001C\x0001\x001D\x0001\x0000\x0001\x0030\x0001\x0000\x0001\x0009" + "\x0002\x000D\x0001\x000E\x0001\x001D\x0004\x0000\x0001\x001C\x0001\x001D\x0001\x0000\x0001\x002F" + "\x0001\x0000\x0001\x0009\x0002\x000A\x0001\x000B\x0001\x001E\x0004\x0000\x0002\x001F\x0001\x0000" + "\x0001\x000A\x0001\x0000\x0001\x0009\x0002\x000A\x0001\x000B\x0001\x001F\x0004\x0000\x0001\x001F" + "\x0001\x0020\x0001\x0000\x0001\x000D\x0001\x0000\x0001\x0009\x0002\x000D\x0001\x000E\x0001\x0020" + "\x0004\x0000\x0001\x001F\x0001\x0020\x0001\x0000\x0001\x000A\x0001\x0000\x0001\x0009\x0002\x000A" + "\x0001\x000B\x0001\x0021\x0004\x0000\x0002\x0022\x0001\x0000\x0001\x000B\x0002\x0000\x0003\x000B" + "\x0001\x0022\x0004\x0000\x0001\x0022\x0001\x0023\x0001\x0000\x0001\x000E\x0002\x0000\x0003\x000E" + "\x0001\x0023\x0004\x0000\x0001\x0022\x0001\x0023\x0001\x0000\x0001\x000B\x0002\x0000\x0003\x000B" + "\x0001\x0024\x0006\x0000\x0001\x000F\x0006\x0000\x0001\x0025\x0004\x0000\x0001\x0014\x0001\x0015" + "\x0001\x0000\x0001\x0031\x0001\x0000\x0001\x0009\x0002\x002A\x0001\x0000\x0001\x0016\x0004\x0000" + "\x0002\x0017\x0001\x0000\x0001\x002E\x0002\x0000\x0001\x002E\x0002\x0000\x0001\x0028\x0004\x0000" + "\x0002\x0014\x0007\x0000\x0001\x0014\x0004\x0000\x0002\x0018\x0007\x0000\x0001\x0018\x0004\x0000" + "\x0002\x001C\x0007\x0000\x0001\x001C\x0004\x0000\x0002\x001F\x0007\x0000\x0001\x001F\x0004\x0000" + "\x0002\x0022\x0007\x0000\x0001\x0022\x0004\x0000\x0002\x0032\x0007\x0000\x0001\x0032\x0004\x0000" + "\x0002\x0014\x0007\x0000\x0001\x0033\x0004\x0000\x0002\x0032\x0001\x0000\x0001\x002E\x0002\x0000" + "\x0001\x002E\x0002\x0000\x0001\x0032\x0004\x0000\x0002\x0014\x0001\x0000\x0001\x0031\x0001\x0000" + 
+                       "\x0001\x0009\x0002\x002A\x0001\x0000\x0001\x0014\x0003\x0000";
+               
+               private static int[] ZzUnpackTrans()
+               {
+                       int[] result = new int[658];
+                       int offset = 0;
+                       offset = ZzUnpackTrans(ZZ_TRANS_PACKED_0, offset, result);
+                       return result;
+               }
+               
+               private static int ZzUnpackTrans(System.String packed, int offset, int[] result)
+               {
+                       int i = 0; /* index in packed string  */
+                       int j = offset; /* index in unpacked array */
+                       int l = packed.Length;
+                       while (i < l)
+                       {
+                               int count = packed[i++];
+                               int value_Renamed = packed[i++];
+                               value_Renamed--;
+                               do 
+                                       result[j++] = value_Renamed;
+                               while (--count > 0);
+                       }
+                       return j;
+               }
+               
+               
+               /* error codes */
+               private const int ZZ_UNKNOWN_ERROR = 0;
+               private const int ZZ_NO_MATCH = 1;
+               private const int ZZ_PUSHBACK_2BIG = 2;
+               
+               /* error messages for the codes above */
+               private static readonly System.String[] ZZ_ERROR_MSG = new System.String[]{"Unkown internal scanner error", "Error: could not match input", "Error: pushback value was too large"};
+               
+               /// <summary> ZZ_ATTRIBUTE[aState] contains the attributes of state <code>aState</code></summary>
+               private static readonly int[] ZZ_ATTRIBUTE = ZzUnpackAttribute();
+               
+               private const System.String ZZ_ATTRIBUTE_PACKED_0 = "\x0001\x0000\x0001\x0009\x0003\x0001\x0001\x0009\x0001\x0001\x000B\x0000\x0004\x0001\x0002\x0000" + "\x0001\x0001\x0001\x0000\x000F\x0001\x0001\x0000\x0001\x0001\x0003\x0000\x0005\x0001";
+               
+               private static int[] ZzUnpackAttribute()
+               {
+                       int[] result = new int[51];
+                       int offset = 0;
+                       offset = ZzUnpackAttribute(ZZ_ATTRIBUTE_PACKED_0, offset, result);
+                       return result;
+               }
+               
+               private static int ZzUnpackAttribute(System.String packed, int offset, int[] result)
+               {
+                       int i = 0; /* index in packed string  */
+                       int j = offset; /* index in unpacked array */
+                       int l = packed.Length;
+                       while (i < l)
+                       {
+                               int count = packed[i++];
+                               int value_Renamed = packed[i++];
+                               do 
+                                       result[j++] = value_Renamed;
+                               while (--count > 0);
+                       }
+                       return j;
+               }
+               
+               /// <summary>the input device </summary>
+               private System.IO.TextReader zzReader;
+               
+               /// <summary>the current state of the DFA </summary>
+               private int zzState;
+               
+               /// <summary>the current lexical state </summary>
+               private int zzLexicalState = YYINITIAL;
+               
+               /// <summary>this buffer contains the current text to be matched and is
+               /// the source of the yytext() string 
+               /// </summary>
+               private char[] zzBuffer = new char[ZZ_BUFFERSIZE];
+               
+               /// <summary>the textposition at the last accepting state </summary>
+               private int zzMarkedPos;
+               
+               /// <summary>the textposition at the last state to be included in yytext </summary>
+               private int zzPushbackPos;
+               
+               /// <summary>the current text position in the buffer </summary>
+               private int zzCurrentPos;
+               
+               /// <summary>startRead marks the beginning of the yytext() string in the buffer </summary>
+               private int zzStartRead;
+               
+               /// <summary>endRead marks the last character in the buffer, that has been read
+               /// from input 
+               /// </summary>
+               private int zzEndRead;
+               
+               /// <summary>number of newlines encountered up to the start of the matched text </summary>
+               private int yyline;
+               
+               /// <summary>the number of characters up to the start of the matched text </summary>
+               private int yychar;
+               
+               /// <summary> the number of characters from the last newline up to the start of the 
+               /// matched text
+               /// </summary>
+               private int yycolumn;
+
+        /// <summary> zzAtBOL == true &lt;=&gt; the scanner is currently at the beginning of a line</summary>
+               private bool zzAtBOL = true;
+
+        /// <summary>zzAtEOF == true &lt;=&gt; the scanner is at the EOF </summary>
+               private bool zzAtEOF;
+               
+               /* user code: */
+               
+               public static readonly int ALPHANUM;
+               public static readonly int APOSTROPHE;
+               public static readonly int ACRONYM;
+               public static readonly int COMPANY;
+               public static readonly int EMAIL;
+               public static readonly int HOST;
+               public static readonly int NUM;
+               public static readonly int CJ;
+               /// <deprecated> this solves a bug where HOSTs that end with '.' are identified
+               /// as ACRONYMs. It is deprecated and will be removed in the next
+               /// release.
+               /// </deprecated>
+        [Obsolete("this solves a bug where HOSTs that end with '.' are identified as ACRONYMs. It is deprecated and will be removed in the next release.")]
+               public static readonly int ACRONYM_DEP;
+               
+               public static readonly System.String[] TOKEN_TYPES;
+               
+               public int Yychar()
+               {
+                       return yychar;
+               }
+
+        /**
+        * Resets the Tokenizer to a new Reader.
+        */
+        internal void Reset(System.IO.TextReader r)
+        {
+            // reset to default buffer size, if buffer has grown
+            if (zzBuffer.Length > ZZ_BUFFERSIZE)
+            {
+                zzBuffer = new char[ZZ_BUFFERSIZE];
+            }
+            Yyreset(r);
+        }
+               
+               /// <summary> Fills Lucene token with the current token text.</summary>
+               internal void  GetText(Token t)
+               {
+                       t.SetTermBuffer(zzBuffer, zzStartRead, zzMarkedPos - zzStartRead);
+               }
+               
+               /// <summary> Fills TermAttribute with the current token text.</summary>
+               internal void  GetText(TermAttribute t)
+               {
+                       t.SetTermBuffer(zzBuffer, zzStartRead, zzMarkedPos - zzStartRead);
+               }
+               
+               
+               /// <summary> Creates a new scanner
+               /// There is also a java.io.InputStream version of this constructor.
+               /// 
+               /// </summary>
+               /// <param name="in"> the java.io.Reader to read input from.
+               /// </param>
+               internal StandardTokenizerImpl(System.IO.TextReader in_Renamed)
+               {
+                       this.zzReader = in_Renamed;
+               }
+               
+               /// <summary> Creates a new scanner.
+               /// There is also java.io.Reader version of this constructor.
+               /// 
+               /// </summary>
+               /// <param name="in"> the java.io.Inputstream to read input from.
+               /// </param>
+               internal StandardTokenizerImpl(System.IO.Stream in_Renamed):this(new System.IO.StreamReader(in_Renamed, System.Text.Encoding.Default))
+               {
+               }
+               
+               /// <summary> Unpacks the compressed character translation table.
+               /// 
+               /// </summary>
+               /// <param name="packed">  the packed character translation table
+               /// </param>
+               /// <returns>         the unpacked character translation table
+               /// </returns>
+               private static char[] ZzUnpackCMap(System.String packed)
+               {
+                       char[] map = new char[0x10000];
+                       int i = 0; /* index in packed string  */
+                       int j = 0; /* index in unpacked array */
+                       while (i < 1154)
+                       {
+                               int count = packed[i++];
+                               char value_Renamed = packed[i++];
+                               do 
+                                       map[j++] = value_Renamed;
+                               while (--count > 0);
+                       }
+                       return map;
+               }
+               
+               
+               /// <summary> Refills the input buffer.
+               /// 
+               /// </summary>
+               /// <returns>      <code>false</code>, iff there was new input.
+               /// 
+               /// </returns>
+               /// <exception cref="java.io.IOException"> if any I/O-Error occurs
+               /// </exception>
+               private bool ZzRefill()
+               {
+                       
+                       /* first: make room (if you can) */
+                       if (zzStartRead > 0)
+                       {
+                               Array.Copy(zzBuffer, zzStartRead, zzBuffer, 0, zzEndRead - zzStartRead);
+                               
+                               /* translate stored positions */
+                               zzEndRead -= zzStartRead;
+                               zzCurrentPos -= zzStartRead;
+                               zzMarkedPos -= zzStartRead;
+                               zzPushbackPos -= zzStartRead;
+                               zzStartRead = 0;
+                       }
+                       
+                       /* is the buffer big enough? */
+                       if (zzCurrentPos >= zzBuffer.Length)
+                       {
+                               /* if not: blow it up */
+                               char[] newBuffer = new char[zzCurrentPos * 2];
+                               Array.Copy(zzBuffer, 0, newBuffer, 0, zzBuffer.Length);
+                               zzBuffer = newBuffer;
+                       }
+                       
+                       /* finally: fill the buffer with new input */
+                       int numRead = zzReader.Read(zzBuffer, zzEndRead, zzBuffer.Length - zzEndRead);
+                       
+                       if (numRead <= 0)
+                       {
+                               return true;
+                       }
+                       else
+                       {
+                               zzEndRead += numRead;
+                               return false;
+                       }
+               }
+               
+               
+               /// <summary> Closes the input stream.</summary>
+               public void  Yyclose()
+               {
+                       zzAtEOF = true; /* indicate end of file */
+                       zzEndRead = zzStartRead; /* invalidate buffer    */
+                       
+                       if (zzReader != null)
+                               zzReader.Close();
+               }
+               
+               
+               /// <summary> Resets the scanner to read from a new input stream.
+               /// Does not close the old reader.
+               /// 
+               /// All internal variables are reset, the old input stream 
+               /// <b>cannot</b> be reused (internal buffer is discarded and lost).
+               /// Lexical state is set to <tt>ZZ_INITIAL</tt>.
+               /// 
+               /// </summary>
+               /// <param name="reader">  the new input stream 
+               /// </param>
+               public void  Yyreset(System.IO.TextReader reader)
+               {
+                       zzReader = reader;
+                       zzAtBOL = true;
+                       zzAtEOF = false;
+                       zzEndRead = zzStartRead = 0;
+                       zzCurrentPos = zzMarkedPos = zzPushbackPos = 0;
+                       yyline = yychar = yycolumn = 0;
+                       zzLexicalState = YYINITIAL;
+               }
+               
+               
+               /// <summary> Returns the current lexical state.</summary>
+               public int Yystate()
+               {
+                       return zzLexicalState;
+               }
+               
+               
+               /// <summary> Enters a new lexical state
+               /// 
+               /// </summary>
+               /// <param name="newState">the new lexical state
+               /// </param>
+               public void  Yybegin(int newState)
+               {
+                       zzLexicalState = newState;
+               }
+               
+               
+               /// <summary> Returns the text matched by the current regular expression.</summary>
+               public System.String Yytext()
+               {
+                       return new System.String(zzBuffer, zzStartRead, zzMarkedPos - zzStartRead);
+               }
+               
+               
+               /// <summary> Returns the character at position <tt>pos</tt> from the 
+               /// matched text. 
+               /// 
+               /// It is equivalent to yytext().charAt(pos), but faster
+               /// 
+               /// </summary>
+               /// <param name="pos">the position of the character to fetch. 
+               /// A value from 0 to yylength()-1.
+               /// 
+               /// </param>
+               /// <returns> the character at position pos
+               /// </returns>
+               public char Yycharat(int pos)
+               {
+                       return zzBuffer[zzStartRead + pos];
+               }
+               
+               
+               /// <summary> Returns the length of the matched text region.</summary>
+               public int Yylength()
+               {
+                       return zzMarkedPos - zzStartRead;
+               }
+               
+               
+               /// <summary> Reports an error that occured while scanning.
+               /// 
+               /// In a wellformed scanner (no or only correct usage of 
+               /// yypushback(int) and a match-all fallback rule) this method 
+               /// will only be called with things that "Can't Possibly Happen".
+               /// If this method is called, something is seriously wrong
+               /// (e.g. a JFlex bug producing a faulty scanner etc.).
+               /// 
+               /// Usual syntax/scanner level error handling should be done
+               /// in error fallback rules.
+               /// 
+               /// </summary>
+               /// <param name="errorCode"> the code of the errormessage to display
+               /// </param>
+               private void  ZzScanError(int errorCode)
+               {
+                       System.String message;
+                       try
+                       {
+                               message = ZZ_ERROR_MSG[errorCode];
+                       }
+                       catch (System.IndexOutOfRangeException e)
+                       {
+                               message = ZZ_ERROR_MSG[ZZ_UNKNOWN_ERROR];
+                       }
+                       
+                       throw new System.ApplicationException(message);
+               }
+               
+               
+               /// <summary> Pushes the specified amount of characters back into the input stream.
+               /// 
+               /// They will be read again by then next call of the scanning method
+               /// 
+               /// </summary>
+               /// <param name="number"> the number of characters to be read again.
+               /// This number must not be greater than yylength()!
+               /// </param>
+               public virtual void  Yypushback(int number)
+               {
+                       if (number > Yylength())
+                               ZzScanError(ZZ_PUSHBACK_2BIG);
+                       
+                       zzMarkedPos -= number;
+               }
+               
+               
+               /// <summary> Resumes scanning until the next regular expression is matched,
+               /// the end of input is encountered or an I/O-Error occurs.
+               /// 
+               /// </summary>
+               /// <returns>      the next token
+               /// </returns>
+               /// <exception cref="java.io.IOException"> if any I/O-Error occurs
+               /// </exception>
+               public virtual int GetNextToken()
+               {
+                       int zzInput;
+                       int zzAction;
+                       
+                       // cached fields:
+                       int zzCurrentPosL;
+                       int zzMarkedPosL;
+                       int zzEndReadL = zzEndRead;
+                       char[] zzBufferL = zzBuffer;
+                       char[] zzCMapL = ZZ_CMAP;
+                       
+                       int[] zzTransL = ZZ_TRANS;
+                       int[] zzRowMapL = ZZ_ROWMAP;
+                       int[] zzAttrL = ZZ_ATTRIBUTE;
+                       
+                       while (true)
+                       {
+                               zzMarkedPosL = zzMarkedPos;
+                               
+                               yychar += zzMarkedPosL - zzStartRead;
+                               
+                               zzAction = - 1;
+                               
+                               zzCurrentPosL = zzCurrentPos = zzStartRead = zzMarkedPosL;
+                               
+                               zzState = zzLexicalState;
+                               
+                               
+                               {
+                                       while (true)
+                                       {
+                                               
+                                               if (zzCurrentPosL < zzEndReadL)
+                                                       zzInput = zzBufferL[zzCurrentPosL++];
+                                               else if (zzAtEOF)
+                                               {
+                                                       zzInput = YYEOF;
+                                                       goto zzForAction_brk;   // {{Aroush-2.9}} this 'goto' maybe in the wrong place
+                                               }
+                                               else
+                                               {
+                                                       // store back cached positions
+                                                       zzCurrentPos = zzCurrentPosL;
+                                                       zzMarkedPos = zzMarkedPosL;
+                                                       bool eof = ZzRefill();
+                                                       // get translated positions and possibly new buffer
+                                                       zzCurrentPosL = zzCurrentPos;
+                                                       zzMarkedPosL = zzMarkedPos;
+                                                       zzBufferL = zzBuffer;
+                                                       zzEndReadL = zzEndRead;
+                                                       if (eof)
+                                                       {
+                                                               zzInput = YYEOF;
+                                                               goto zzForAction_brk;   // {{Aroush-2.9}} this 'goto' maybe in the wrong place
+                                                       }
+                                                       else
+                                                       {
+                                                               zzInput = zzBufferL[zzCurrentPosL++];
+                                                       }
+                                               }
+                                               int zzNext = zzTransL[zzRowMapL[zzState] + zzCMapL[zzInput]];
+                                               if (zzNext == - 1)
+                                               {
+                                                       goto zzForAction_brk;   // {{Aroush-2.9}} this 'goto' maybe in the wrong place
+                                               }
+                                               zzState = zzNext;
+                                               
+                                               int zzAttributes = zzAttrL[zzState];
+                                               if ((zzAttributes & 1) == 1)
+                                               {
+                                                       zzAction = zzState;
+                                                       zzMarkedPosL = zzCurrentPosL;
+                                                       if ((zzAttributes & 8) == 8)
+                                                       {
+                                                               goto zzForAction_brk;   // {{Aroush-2.9}} this 'goto' maybe in the wrong place
+                                                       }
+                                               }
+                                       }
+                               }
+
+zzForAction_brk: ;  // {{Aroush-2.9}} this 'lable' maybe in the wrong place
+                               
+                               
+                               // store back cached position
+                               zzMarkedPos = zzMarkedPosL;
+                               
+                               switch (zzAction < 0?zzAction:ZZ_ACTION[zzAction])
+                               {
+                                       
+                                       case 4: 
+                                       {
+                                               return HOST;
+                                       }
+                                       
+                                       case 11:  break;
+                                       
+                                       case 9: 
+                                       {
+                                               return ACRONYM;
+                                       }
+                                       
+                                       case 12:  break;
+                                       
+                                       case 8: 
+                                       {
+                                               return ACRONYM_DEP;
+                                       }
+                                       
+                                       case 13:  break;
+                                       
+                                       case 1: 
+                                               {
+                                                       /* ignore */
+                                               }
+                                               goto case 14;
+                                       
+                                       case 14:  break;
+                                       
+                                       case 5: 
+                                       {
+                                               return NUM;
+                                       }
+                                       
+                                       case 15:  break;
+                                       
+                                       case 3: 
+                                       {
+                                               return CJ;
+                                       }
+                                       
+                                       case 16:  break;
+                                       
+                                       case 2: 
+                                       {
+                                               return ALPHANUM;
+                                       }
+                                       
+                                       case 17:  break;
+                                       
+                                       case 7: 
+                                       {
+                                               return COMPANY;
+                                       }
+                                       
+                                       case 18:  break;
+                                       
+                                       case 6: 
+                                       {
+                                               return APOSTROPHE;
+                                       }
+                                       
+                                       case 19:  break;
+                                       
+                                       case 10: 
+                                       {
+                                               return EMAIL;
+                                       }
+                                       
+                                       case 20:  break;
+                                       
+                                       default: 
+                                               if (zzInput == YYEOF && zzStartRead == zzCurrentPos)
+                                               {
+                                                       zzAtEOF = true;
+                                                       return YYEOF;
+                                               }
+                                               else
+                                               {
+                                                       ZzScanError(ZZ_NO_MATCH);
+                                               }
+                                               break;
+                                       
+                               }
+                       }
+               }
+               static StandardTokenizerImpl()
+               {
+                       ALPHANUM = StandardTokenizer.ALPHANUM;
+                       APOSTROPHE = StandardTokenizer.APOSTROPHE;
+                       ACRONYM = StandardTokenizer.ACRONYM;
+                       COMPANY = StandardTokenizer.COMPANY;
+                       EMAIL = StandardTokenizer.EMAIL;
+                       HOST = StandardTokenizer.HOST;
+                       NUM = StandardTokenizer.NUM;
+                       CJ = StandardTokenizer.CJ;
+                       ACRONYM_DEP = StandardTokenizer.ACRONYM_DEP;
+                       TOKEN_TYPES = StandardTokenizer.TOKEN_TYPES;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/Standard/StandardTokenizerImpl.jflex b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/Standard/StandardTokenizerImpl.jflex
new file mode 100644 (file)
index 0000000..dbc92fd
--- /dev/null
@@ -0,0 +1,156 @@
+package org.apache.lucene.analysis.standard;\r
+\r
+/**\r
+ * Licensed to the Apache Software Foundation (ASF) under one or more\r
+ * contributor license agreements.  See the NOTICE file distributed with\r
+ * this work for additional information regarding copyright ownership.\r
+ * The ASF licenses this file to You under the Apache License, Version 2.0\r
+ * (the "License"); you may not use this file except in compliance with\r
+ * the License.  You may obtain a copy of the License at\r
+ *\r
+ *     http://www.apache.org/licenses/LICENSE-2.0\r
+ *\r
+ * Unless required by applicable law or agreed to in writing, software\r
+ * distributed under the License is distributed on an "AS IS" BASIS,\r
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
+ * See the License for the specific language governing permissions and\r
+ * limitations under the License.\r
+ */\r
+\r
+/*\r
+\r
+NOTE: if you change StandardTokenizerImpl.jflex and need to regenerate\r
+      the tokenizer, remember to use JRE 1.4 to run jflex (before\r
+      Lucene 3.0).  This grammar now uses constructs (eg :digit:,\r
+      :letter:) whose meaning can vary according to the JRE used to\r
+      run jflex.  See\r
+      https://issues.apache.org/jira/browse/LUCENE-1126 for details.\r
+\r
+*/\r
+\r
+import org.apache.lucene.analysis.Token;\r
+import org.apache.lucene.analysis.tokenattributes.TermAttribute;\r
+\r
+%%\r
+\r
+%class StandardTokenizerImpl\r
+%unicode\r
+%integer\r
+%function getNextToken\r
+%pack\r
+%char\r
+\r
+%{\r
+\r
+public static final int ALPHANUM          = StandardTokenizer.ALPHANUM;\r
+public static final int APOSTROPHE        = StandardTokenizer.APOSTROPHE;\r
+public static final int ACRONYM           = StandardTokenizer.ACRONYM;\r
+public static final int COMPANY           = StandardTokenizer.COMPANY;\r
+public static final int EMAIL             = StandardTokenizer.EMAIL;\r
+public static final int HOST              = StandardTokenizer.HOST;\r
+public static final int NUM               = StandardTokenizer.NUM;\r
+public static final int CJ                = StandardTokenizer.CJ;\r
+/**\r
+ * @deprecated this solves a bug where HOSTs that end with '.' are identified\r
+ *             as ACRONYMs. It is deprecated and will be removed in the next\r
+ *             release.\r
+ */\r
+public static final int ACRONYM_DEP       = StandardTokenizer.ACRONYM_DEP;\r
+\r
+public static final String [] TOKEN_TYPES = StandardTokenizer.TOKEN_TYPES;\r
+\r
+public final int yychar()\r
+{\r
+    return yychar;\r
+}\r
+\r
+/**\r
+ * Resets the Tokenizer to a new Reader.\r
+ */\r
+final void reset(java.io.Reader r) {\r
+  // reset to default buffer size, if buffer has grown\r
+  if (zzBuffer.length > ZZ_BUFFERSIZE) {\r
+    zzBuffer = new char[ZZ_BUFFERSIZE];\r
+  }\r
+  yyreset(r);\r
+}\r
+\r
+/**\r
+ * Fills Lucene token with the current token text.\r
+ */\r
+final void getText(Token t) {\r
+  t.setTermBuffer(zzBuffer, zzStartRead, zzMarkedPos-zzStartRead);\r
+}\r
+\r
+/**\r
+ * Fills TermAttribute with the current token text.\r
+ */\r
+final void getText(TermAttribute t) {\r
+  t.setTermBuffer(zzBuffer, zzStartRead, zzMarkedPos-zzStartRead);\r
+}\r
+\r
+%}\r
+\r
+THAI       = [\u0E00-\u0E59]\r
+\r
+// basic word: a sequence of digits & letters (includes Thai to enable ThaiAnalyzer to function)\r
+ALPHANUM   = ({LETTER}|{THAI}|[:digit:])+\r
+\r
+// internal apostrophes: O'Reilly, you're, O'Reilly's\r
+// use a post-filter to remove possessives\r
+APOSTROPHE =  {ALPHA} ("'" {ALPHA})+\r
+\r
+// acronyms: U.S.A., I.B.M., etc.\r
+// use a post-filter to remove dots\r
+ACRONYM    =  {LETTER} "." ({LETTER} ".")+\r
+\r
+ACRONYM_DEP    = {ALPHANUM} "." ({ALPHANUM} ".")+\r
+\r
+// company names like AT&T and Excite@Home.\r
+COMPANY    =  {ALPHA} ("&"|"@") {ALPHA}\r
+\r
+// email addresses\r
+EMAIL      =  {ALPHANUM} (("."|"-"|"_") {ALPHANUM})* "@" {ALPHANUM} (("."|"-") {ALPHANUM})+\r
+\r
+// hostname\r
+HOST       =  {ALPHANUM} ((".") {ALPHANUM})+\r
+\r
+// floating point, serial, model numbers, ip addresses, etc.\r
+// every other segment must have at least one digit\r
+NUM        = ({ALPHANUM} {P} {HAS_DIGIT}\r
+           | {HAS_DIGIT} {P} {ALPHANUM}\r
+           | {ALPHANUM} ({P} {HAS_DIGIT} {P} {ALPHANUM})+\r
+           | {HAS_DIGIT} ({P} {ALPHANUM} {P} {HAS_DIGIT})+\r
+           | {ALPHANUM} {P} {HAS_DIGIT} ({P} {ALPHANUM} {P} {HAS_DIGIT})+\r
+           | {HAS_DIGIT} {P} {ALPHANUM} ({P} {HAS_DIGIT} {P} {ALPHANUM})+)\r
+\r
+// punctuation\r
+P               = ("_"|"-"|"/"|"."|",")\r
+\r
+// at least one digit\r
+HAS_DIGIT  = ({LETTER}|[:digit:])* [:digit:] ({LETTER}|[:digit:])*\r
+\r
+ALPHA      = ({LETTER})+\r
+\r
+// From the JFlex manual: "the expression that matches everything of <a> not matched by <b> is !(!<a>|<b>)"\r
+LETTER     = !(![:letter:]|{CJ})\r
+\r
+// Chinese and Japanese (but NOT Korean, which is included in [:letter:])\r
+CJ         = [\u3100-\u312f\u3040-\u309F\u30A0-\u30FF\u31F0-\u31FF\u3300-\u337f\u3400-\u4dbf\u4e00-\u9fff\uf900-\ufaff\uff65-\uff9f]\r
+\r
+WHITESPACE = \r\n | [ \r\n\t\f]\r
+\r
+%%\r
+\r
+{ALPHANUM}                                                     { return ALPHANUM; }\r
+{APOSTROPHE}                                                   { return APOSTROPHE; }\r
+{ACRONYM}                                                      { return ACRONYM; }\r
+{COMPANY}                                                      { return COMPANY; }\r
+{EMAIL}                                                        { return EMAIL; }\r
+{HOST}                                                         { return HOST; }\r
+{NUM}                                                          { return NUM; }\r
+{CJ}                                                           { return CJ; }\r
+{ACRONYM_DEP}                                                  { return ACRONYM_DEP; }\r
+\r
+/** Ignore the rest */\r
+. | {WHITESPACE}                                               { /* ignore */ }\r
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/StopAnalyzer.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/StopAnalyzer.cs
new file mode 100644 (file)
index 0000000..37c8cfe
--- /dev/null
@@ -0,0 +1,319 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using Version = Mono.Lucene.Net.Util.Version;
+
+namespace Mono.Lucene.Net.Analysis
+{
+       
+       /// <summary> Filters {@link LetterTokenizer} with {@link LowerCaseFilter} and
+       /// {@link StopFilter}.
+       /// 
+       /// <a name="version"/>
+       /// <p/>
+       /// You must specify the required {@link Version} compatibility when creating
+       /// StopAnalyzer:
+       /// <ul>
+       /// <li>As of 2.9, position increments are preserved</li>
+       /// </ul>
+       /// </summary>
+       
+       public sealed class StopAnalyzer:Analyzer
+       {
+               private System.Collections.Hashtable stopWords;
+               // @deprecated
+        [Obsolete]
+               private bool useDefaultStopPositionIncrement;
+               private bool enablePositionIncrements;
+               
+               /// <summary>An array containing some common English words that are not usually useful
+               /// for searching. 
+               /// </summary>
+               /// <deprecated> Use {@link #ENGLISH_STOP_WORDS_SET} instead 
+               /// </deprecated>
+        [Obsolete("Use ENGLISH_STOP_WORDS_SET instead ")]
+               public static readonly System.String[] ENGLISH_STOP_WORDS = new System.String[]{"a", "an", "and", "are", "as", "at", "be", "but", "by", "for", "if", "in", "into", "is", "it", "no", "not", "of", "on", "or", "such", "that", "the", "their", "then", "there", "these", "they", "this", "to", "was", "will", "with"};
+               
+               /// <summary>An unmodifiable set containing some common English words that are not usually useful
+               /// for searching.
+               /// </summary>
+               public static System.Collections.Hashtable ENGLISH_STOP_WORDS_SET;
+               
+               /// <summary>Builds an analyzer which removes words in
+               /// ENGLISH_STOP_WORDS.
+               /// </summary>
+               /// <deprecated> Use {@link #StopAnalyzer(Version)} instead
+               /// </deprecated>
+        [Obsolete("Use StopAnalyzer(Version) instead")]
+               public StopAnalyzer()
+               {
+                       stopWords = ENGLISH_STOP_WORDS_SET;
+                       useDefaultStopPositionIncrement = true;
+                       enablePositionIncrements = false;
+               }
+               
+               /// <summary> Builds an analyzer which removes words in ENGLISH_STOP_WORDS.</summary>
+               public StopAnalyzer(Version matchVersion)
+               {
+                       stopWords = ENGLISH_STOP_WORDS_SET;
+                       useDefaultStopPositionIncrement = false;
+                       enablePositionIncrements = StopFilter.GetEnablePositionIncrementsVersionDefault(matchVersion);
+               }
+               
+               /// <summary>Builds an analyzer which removes words in
+               /// ENGLISH_STOP_WORDS.
+               /// </summary>
+               /// <param name="enablePositionIncrements">
+               /// See {@link StopFilter#SetEnablePositionIncrements}
+               /// </param>
+               /// <deprecated> Use {@link #StopAnalyzer(Version)} instead
+               /// </deprecated>
+        [Obsolete("Use StopAnalyzer(Version) instead")]
+               public StopAnalyzer(bool enablePositionIncrements)
+               {
+                       stopWords = ENGLISH_STOP_WORDS_SET;
+                       this.enablePositionIncrements = enablePositionIncrements;
+                       useDefaultStopPositionIncrement = false;
+               }
+               
+               /// <summary>Builds an analyzer with the stop words from the given set.</summary>
+               /// <deprecated> Use {@link #StopAnalyzer(Version, Set)} instead
+               /// </deprecated>
+        [Obsolete("Use StopAnalyzer(Version, Set) instead")]
+               public StopAnalyzer(System.Collections.Hashtable stopWords)
+               {
+                       this.stopWords = stopWords;
+                       useDefaultStopPositionIncrement = true;
+                       enablePositionIncrements = false;
+               }
+               
+               /// <summary>Builds an analyzer with the stop words from the given set.</summary>
+               public StopAnalyzer(Version matchVersion, System.Collections.Hashtable stopWords)
+               {
+                       this.stopWords = stopWords;
+                       useDefaultStopPositionIncrement = false;
+                       enablePositionIncrements = StopFilter.GetEnablePositionIncrementsVersionDefault(matchVersion);
+               }
+               
+               /// <summary>Builds an analyzer with the stop words from the given set.</summary>
+               /// <param name="stopWords">Set of stop words
+               /// </param>
+               /// <param name="enablePositionIncrements">
+               /// See {@link StopFilter#SetEnablePositionIncrements}
+               /// </param>
+               /// <deprecated> Use {@link #StopAnalyzer(Version, Set)} instead
+               /// </deprecated>
+        [Obsolete("Use StopAnalyzer(Version, Set) instead")]
+               public StopAnalyzer(System.Collections.Hashtable stopWords, bool enablePositionIncrements)
+               {
+                       this.stopWords = stopWords;
+                       this.enablePositionIncrements = enablePositionIncrements;
+                       useDefaultStopPositionIncrement = false;
+               }
+               
+               /// <summary>Builds an analyzer which removes words in the provided array.</summary>
+               /// <deprecated> Use {@link #StopAnalyzer(Set, boolean)} instead 
+               /// </deprecated>
+               /// <deprecated> Use {@link #StopAnalyzer(Version, Set)} instead
+               /// </deprecated>
+        [Obsolete("Use StopAnalyzer(Set, boolean) or StopAnalyzer(Version, Set) instead ")]
+               public StopAnalyzer(System.String[] stopWords)
+               {
+                       this.stopWords = StopFilter.MakeStopSet(stopWords);
+                       useDefaultStopPositionIncrement = true;
+                       enablePositionIncrements = false;
+               }
+               
+               /// <summary>Builds an analyzer which removes words in the provided array.</summary>
+               /// <param name="stopWords">Array of stop words
+               /// </param>
+               /// <param name="enablePositionIncrements">
+               /// See {@link StopFilter#SetEnablePositionIncrements}
+               /// </param>
+               /// <deprecated> Use {@link #StopAnalyzer(Version, Set)} instead
+               /// </deprecated>
+        [Obsolete("Use StopAnalyzer(Version, Set) instead")]
+               public StopAnalyzer(System.String[] stopWords, bool enablePositionIncrements)
+               {
+                       this.stopWords = StopFilter.MakeStopSet(stopWords);
+                       this.enablePositionIncrements = enablePositionIncrements;
+                       useDefaultStopPositionIncrement = false;
+               }
+               
+               /// <summary>Builds an analyzer with the stop words from the given file.</summary>
+               /// <seealso cref="WordlistLoader.GetWordSet(File)">
+               /// </seealso>
+               /// <deprecated> Use {@link #StopAnalyzer(Version, File)} instead
+               /// </deprecated>
+        [Obsolete("Use StopAnalyzer(Version, File) instead")]
+               public StopAnalyzer(System.IO.FileInfo stopwordsFile)
+               {
+                       stopWords = WordlistLoader.GetWordSet(stopwordsFile);
+                       useDefaultStopPositionIncrement = true;
+                       enablePositionIncrements = false;
+               }
+               
+               /// <summary>Builds an analyzer with the stop words from the given file.</summary>
+               /// <seealso cref="WordlistLoader.GetWordSet(File)">
+               /// </seealso>
+               /// <param name="stopwordsFile">File to load stop words from
+               /// </param>
+               /// <param name="enablePositionIncrements">
+               /// See {@link StopFilter#SetEnablePositionIncrements}
+               /// </param>
+               /// <deprecated> Use {@link #StopAnalyzer(Version, File)} instead
+               /// </deprecated>
+        [Obsolete("Use StopAnalyzer(Version, File) instead")]
+               public StopAnalyzer(System.IO.FileInfo stopwordsFile, bool enablePositionIncrements)
+               {
+                       stopWords = WordlistLoader.GetWordSet(stopwordsFile);
+                       this.enablePositionIncrements = enablePositionIncrements;
+                       useDefaultStopPositionIncrement = false;
+               }
+               
+               /// <summary> Builds an analyzer with the stop words from the given file.
+               /// 
+               /// </summary>
+               /// <seealso cref="WordlistLoader.getWordSet(File)">
+               /// </seealso>
+               /// <param name="matchVersion">See <a href="#version">above</a>
+               /// </param>
+               /// <param name="stopwordsFile">File to load stop words from
+               /// </param>
+               public StopAnalyzer(Version matchVersion, System.IO.FileInfo stopwordsFile)
+               {
+                       stopWords = WordlistLoader.GetWordSet(stopwordsFile);
+                       this.enablePositionIncrements = StopFilter.GetEnablePositionIncrementsVersionDefault(matchVersion);
+                       useDefaultStopPositionIncrement = false;
+               }
+               
+               /// <summary>Builds an analyzer with the stop words from the given reader.</summary>
+               /// <seealso cref="WordlistLoader.GetWordSet(Reader)">
+               /// </seealso>
+               /// <deprecated> Use {@link #StopAnalyzer(Version, Reader)} instead
+               /// </deprecated>
+        [Obsolete("Use StopAnalyzer(Version, Reader) instead")]
+               public StopAnalyzer(System.IO.TextReader stopwords)
+               {
+                       stopWords = WordlistLoader.GetWordSet(stopwords);
+                       useDefaultStopPositionIncrement = true;
+                       enablePositionIncrements = false;
+               }
+               
+               /// <summary>Builds an analyzer with the stop words from the given reader.</summary>
+               /// <seealso cref="WordlistLoader.GetWordSet(Reader)">
+               /// </seealso>
+               /// <param name="stopwords">Reader to load stop words from
+               /// </param>
+               /// <param name="enablePositionIncrements">
+               /// See {@link StopFilter#SetEnablePositionIncrements}
+               /// </param>
+               /// <deprecated> Use {@link #StopAnalyzer(Version, Reader)} instead
+               /// </deprecated>
+        [Obsolete("Use StopAnalyzer(Version, Reader) instead")]
+               public StopAnalyzer(System.IO.TextReader stopwords, bool enablePositionIncrements)
+               {
+                       stopWords = WordlistLoader.GetWordSet(stopwords);
+                       this.enablePositionIncrements = enablePositionIncrements;
+                       useDefaultStopPositionIncrement = false;
+               }
+
+        /// <summary>Builds an analyzer with the stop words from the given reader. </summary>
+        /// <seealso cref="WordlistLoader.GetWordSet(Reader)">
+        /// </seealso>
+        /// <param name="matchVersion">See <a href="#Version">above</a>
+        /// </param>
+        /// <param name="stopwords">Reader to load stop words from
+        /// </param>
+        public StopAnalyzer(Version matchVersion, System.IO.TextReader stopwords)
+        {
+            stopWords = WordlistLoader.GetWordSet(stopwords);
+            this.enablePositionIncrements = StopFilter.GetEnablePositionIncrementsVersionDefault(matchVersion);
+            useDefaultStopPositionIncrement = false;
+        }
+
+        /// <summary>Filters LowerCaseTokenizer with StopFilter. </summary>
+               public override TokenStream TokenStream(System.String fieldName, System.IO.TextReader reader)
+               {
+                       if (useDefaultStopPositionIncrement)
+                       {
+                               return new StopFilter(new LowerCaseTokenizer(reader), stopWords);
+                       }
+                       else
+                       {
+                               return new StopFilter(enablePositionIncrements, new LowerCaseTokenizer(reader), stopWords);
+                       }
+               }
+               
+               /// <summary>Filters LowerCaseTokenizer with StopFilter. </summary>
+               private class SavedStreams
+               {
+                       public SavedStreams(StopAnalyzer enclosingInstance)
+                       {
+                               InitBlock(enclosingInstance);
+                       }
+                       private void  InitBlock(StopAnalyzer enclosingInstance)
+                       {
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private StopAnalyzer enclosingInstance;
+                       public StopAnalyzer Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       internal Tokenizer source;
+                       internal TokenStream result;
+               }
+               
+               public override TokenStream ReusableTokenStream(System.String fieldName, System.IO.TextReader reader)
+               {
+                       SavedStreams streams = (SavedStreams) GetPreviousTokenStream();
+                       if (streams == null)
+                       {
+                               streams = new SavedStreams(this);
+                               streams.source = new LowerCaseTokenizer(reader);
+                               if (useDefaultStopPositionIncrement)
+                               {
+                                       streams.result = new StopFilter(streams.source, stopWords);
+                               }
+                               else
+                               {
+                                       streams.result = new StopFilter(enablePositionIncrements, streams.source, stopWords);
+                               }
+                               SetPreviousTokenStream(streams);
+                       }
+                       else
+                               streams.source.Reset(reader);
+                       return streams.result;
+               }
+               static StopAnalyzer()
+               {
+                       {
+                               System.String[] stopWords = new System.String[]{"a", "an", "and", "are", "as", "at", "be", "but", "by", "for", "if", "in", "into", "is", "it", "no", "not", "of", "on", "or", "such", "that", "the", "their", "then", "there", "these", "they", "this", "to", "was", "will", "with"};
+                               CharArraySet stopSet = new CharArraySet(stopWords.Length, false);
+                               stopSet.AddAll(new System.Collections.ArrayList(stopWords));
+                               ENGLISH_STOP_WORDS_SET = CharArraySet.UnmodifiableSet(stopSet);
+                       }
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/StopFilter.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/StopFilter.cs
new file mode 100644 (file)
index 0000000..df1d956
--- /dev/null
@@ -0,0 +1,341 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using PositionIncrementAttribute = Mono.Lucene.Net.Analysis.Tokenattributes.PositionIncrementAttribute;
+using TermAttribute = Mono.Lucene.Net.Analysis.Tokenattributes.TermAttribute;
+using QueryParser = Mono.Lucene.Net.QueryParsers.QueryParser;
+using Version = Mono.Lucene.Net.Util.Version;
+
+namespace Mono.Lucene.Net.Analysis
+{
+       
+       /// <summary> Removes stop words from a token stream.</summary>
+       
+       public sealed class StopFilter:TokenFilter
+       {
+               
+               // deprecated
+        [Obsolete]
+               private static bool ENABLE_POSITION_INCREMENTS_DEFAULT = false;
+               
+               private CharArraySet stopWords;
+               private bool enablePositionIncrements = ENABLE_POSITION_INCREMENTS_DEFAULT;
+               
+               private TermAttribute termAtt;
+               private PositionIncrementAttribute posIncrAtt;
+               
+               /// <summary> Construct a token stream filtering the given input.</summary>
+               /// <deprecated> Use {@link #StopFilter(boolean, TokenStream, String[])} instead
+               /// </deprecated>
+        [Obsolete("Use StopFilter(bool, TokenStream, String[]) instead")]
+               public StopFilter(TokenStream input, System.String[] stopWords):this(ENABLE_POSITION_INCREMENTS_DEFAULT, input, stopWords, false)
+               {
+               }
+               
+               /// <summary> Construct a token stream filtering the given input.</summary>
+               /// <param name="enablePositionIncrements">true if token positions should record the removed stop words
+               /// </param>
+               /// <param name="input">input TokenStream
+               /// </param>
+               /// <param name="stopWords">array of stop words
+               /// </param>
+               /// <deprecated> Use {@link #StopFilter(boolean, TokenStream, Set)} instead.
+               /// </deprecated>
+        [Obsolete("Use StopFilter(bool, TokenStream, Hashtable) instead.")]
+               public StopFilter(bool enablePositionIncrements, TokenStream input, System.String[] stopWords):this(enablePositionIncrements, input, stopWords, false)
+               {
+               }
+               
+               /// <summary> Constructs a filter which removes words from the input
+               /// TokenStream that are named in the array of words.
+               /// </summary>
+               /// <deprecated> Use {@link #StopFilter(boolean, TokenStream, String[], boolean)} instead
+               /// </deprecated>
+        [Obsolete("Use {@link #StopFilter(bool, TokenStream, String[], bool)} instead")]
+               public StopFilter(TokenStream in_Renamed, System.String[] stopWords, bool ignoreCase):this(ENABLE_POSITION_INCREMENTS_DEFAULT, in_Renamed, stopWords, ignoreCase)
+               {
+               }
+               
+               /// <summary> Constructs a filter which removes words from the input
+               /// TokenStream that are named in the array of words.
+               /// </summary>
+               /// <param name="enablePositionIncrements">true if token positions should record the removed stop words
+               /// </param>
+               /// <param name="in">input TokenStream
+               /// </param>
+               /// <param name="stopWords">array of stop words
+               /// </param>
+               /// <param name="ignoreCase">true if case is ignored
+               /// </param>
+               /// <deprecated> Use {@link #StopFilter(boolean, TokenStream, Set, boolean)} instead.
+               /// </deprecated>
+        [Obsolete("Use StopFilter(bool, TokenStream, Hashtable, bool) instead.")]
+               public StopFilter(bool enablePositionIncrements, TokenStream in_Renamed, System.String[] stopWords, bool ignoreCase):base(in_Renamed)
+               {
+                       this.stopWords = (CharArraySet) MakeStopSet(stopWords, ignoreCase);
+                       this.enablePositionIncrements = enablePositionIncrements;
+                       Init();
+               }
+               
+               
+               /// <summary> Construct a token stream filtering the given input.
+               /// If <code>stopWords</code> is an instance of {@link CharArraySet} (true if
+               /// <code>makeStopSet()</code> was used to construct the set) it will be directly used
+               /// and <code>ignoreCase</code> will be ignored since <code>CharArraySet</code>
+               /// directly controls case sensitivity.
+               /// <p/>
+               /// If <code>stopWords</code> is not an instance of {@link CharArraySet},
+               /// a new CharArraySet will be constructed and <code>ignoreCase</code> will be
+               /// used to specify the case sensitivity of that set.
+               /// 
+               /// </summary>
+               /// <param name="input">
+               /// </param>
+               /// <param name="stopWords">The set of Stop Words.
+               /// </param>
+               /// <param name="ignoreCase">-Ignore case when stopping.
+               /// </param>
+               /// <deprecated> Use {@link #StopFilter(boolean, TokenStream, Set, boolean)} instead
+               /// </deprecated>
+        [Obsolete("Use StopFilter(bool, TokenStream, Set, bool) instead")]
+               public StopFilter(TokenStream input, System.Collections.Hashtable stopWords, bool ignoreCase):this(ENABLE_POSITION_INCREMENTS_DEFAULT, input, stopWords, ignoreCase)
+               {
+               }
+               
+               /// <summary> Construct a token stream filtering the given input.
+               /// If <code>stopWords</code> is an instance of {@link CharArraySet} (true if
+               /// <code>makeStopSet()</code> was used to construct the set) it will be directly used
+               /// and <code>ignoreCase</code> will be ignored since <code>CharArraySet</code>
+               /// directly controls case sensitivity.
+               /// <p/>
+               /// If <code>stopWords</code> is not an instance of {@link CharArraySet},
+               /// a new CharArraySet will be constructed and <code>ignoreCase</code> will be
+               /// used to specify the case sensitivity of that set.
+               /// 
+               /// </summary>
+               /// <param name="enablePositionIncrements">true if token positions should record the removed stop words
+               /// </param>
+               /// <param name="input">Input TokenStream
+               /// </param>
+               /// <param name="stopWords">The set of Stop Words.
+               /// </param>
+               /// <param name="ignoreCase">-Ignore case when stopping.
+               /// </param>
+               public StopFilter(bool enablePositionIncrements, TokenStream input, System.Collections.Hashtable stopWords, bool ignoreCase):base(input)
+               {
+                       if (stopWords is CharArraySet)
+                       {
+                               this.stopWords = (CharArraySet) stopWords;
+                       }
+                       else
+                       {
+                               this.stopWords = new CharArraySet(stopWords.Count, ignoreCase);
+                               this.stopWords.Add(stopWords);
+                       }
+                       this.enablePositionIncrements = enablePositionIncrements;
+                       Init();
+               }
+               
+               /// <summary> Constructs a filter which removes words from the input
+               /// TokenStream that are named in the Set.
+               /// 
+               /// </summary>
+               /// <seealso cref="MakeStopSet(java.lang.String[])">
+               /// </seealso>
+               /// <deprecated> Use {@link #StopFilter(boolean, TokenStream, Set)} instead
+               /// </deprecated>
+        [Obsolete("Use StopFilter(bool, TokenStream, Hashtable) instead")]
+               public StopFilter(TokenStream in_Renamed, System.Collections.Hashtable stopWords):this(ENABLE_POSITION_INCREMENTS_DEFAULT, in_Renamed, stopWords, false)
+               {
+               }
+               
+               /// <summary> Constructs a filter which removes words from the input
+               /// TokenStream that are named in the Set.
+               /// 
+               /// </summary>
+               /// <param name="enablePositionIncrements">true if token positions should record the removed stop words
+               /// </param>
+               /// <param name="in">Input stream
+               /// </param>
+               /// <param name="stopWords">The set of Stop Words.
+               /// </param>
+               /// <seealso cref="MakeStopSet(java.lang.String[])">
+               /// </seealso>
+               public StopFilter(bool enablePositionIncrements, TokenStream in_Renamed, System.Collections.Hashtable stopWords):this(enablePositionIncrements, in_Renamed, stopWords, false)
+               {
+               }
+               
+               public void  Init()
+               {
+                       termAtt = (TermAttribute) AddAttribute(typeof(TermAttribute));
+                       posIncrAtt = (PositionIncrementAttribute) AddAttribute(typeof(PositionIncrementAttribute));
+               }
+               
+               /// <summary> Builds a Set from an array of stop words,
+               /// appropriate for passing into the StopFilter constructor.
+               /// This permits this stopWords construction to be cached once when
+               /// an Analyzer is constructed.
+               /// 
+               /// </summary>
+               /// <seealso cref="MakeStopSet(java.lang.String[], boolean)"> passing false to ignoreCase
+               /// </seealso>
+               public static System.Collections.Hashtable MakeStopSet(System.String[] stopWords)
+               {
+                       return MakeStopSet(stopWords, false);
+               }
+               
+               /// <summary> Builds a Set from an array of stop words,
+               /// appropriate for passing into the StopFilter constructor.
+               /// This permits this stopWords construction to be cached once when
+               /// an Analyzer is constructed.
+               /// 
+               /// </summary>
+               /// <seealso cref="MakeStopSet(java.lang.String[], boolean)"> passing false to ignoreCase
+               /// </seealso>
+               public static System.Collections.Hashtable MakeStopSet(System.Collections.IList stopWords)
+               {
+                       return MakeStopSet(stopWords, false);
+               }
+               
+               /// <summary> </summary>
+               /// <param name="stopWords">An array of stopwords
+               /// </param>
+               /// <param name="ignoreCase">If true, all words are lower cased first.  
+               /// </param>
+               /// <returns> a Set containing the words
+               /// </returns>
+               public static System.Collections.Hashtable MakeStopSet(System.String[] stopWords, bool ignoreCase)
+               {
+                       CharArraySet stopSet = new CharArraySet(stopWords.Length, ignoreCase);
+                       stopSet.AddAll(new System.Collections.ArrayList(stopWords));
+                       return stopSet;
+               }
+               
+               /// <summary> </summary>
+               /// <param name="stopWords">A List of Strings representing the stopwords
+               /// </param>
+               /// <param name="ignoreCase">if true, all words are lower cased first
+               /// </param>
+               /// <returns> A Set containing the words
+               /// </returns>
+               public static System.Collections.Hashtable MakeStopSet(System.Collections.IList stopWords, bool ignoreCase)
+               {
+                       CharArraySet stopSet = new CharArraySet(stopWords.Count, ignoreCase);
+                       stopSet.AddAll(stopWords);
+                       return stopSet;
+               }
+               
+               /// <summary> Returns the next input Token whose term() is not a stop word.</summary>
+               public override bool IncrementToken()
+               {
+                       // return the first non-stop word found
+                       int skippedPositions = 0;
+                       while (input.IncrementToken())
+                       {
+                               if (!stopWords.Contains(termAtt.TermBuffer(), 0, termAtt.TermLength()))
+                               {
+                                       if (enablePositionIncrements)
+                                       {
+                                               posIncrAtt.SetPositionIncrement(posIncrAtt.GetPositionIncrement() + skippedPositions);
+                                       }
+                                       return true;
+                               }
+                               skippedPositions += posIncrAtt.GetPositionIncrement();
+                       }
+                       // reached EOS -- return null
+                       return false;
+               }
+               
+               /// <seealso cref="SetEnablePositionIncrementsDefault(bool)">
+               /// </seealso>
+               /// <deprecated> Please specify this when you create the StopFilter
+               /// </deprecated>
+        [Obsolete("Please specify this when you create the StopFilter")]
+               public static bool GetEnablePositionIncrementsDefault()
+               {
+                       return ENABLE_POSITION_INCREMENTS_DEFAULT;
+               }
+               
+               /// <summary> Returns version-dependent default for enablePositionIncrements. Analyzers
+               /// that embed StopFilter use this method when creating the StopFilter. Prior
+               /// to 2.9, this returns {@link #getEnablePositionIncrementsDefault}. On 2.9
+               /// or later, it returns true.
+               /// </summary>
+               public static bool GetEnablePositionIncrementsVersionDefault(Version matchVersion)
+               {
+                       if (matchVersion.OnOrAfter(Version.LUCENE_29))
+                       {
+                               return true;
+                       }
+                       else
+                       {
+                               return ENABLE_POSITION_INCREMENTS_DEFAULT;
+                       }
+               }
+               
+               /// <summary> Set the default position increments behavior of every StopFilter created
+               /// from now on.
+               /// <p/>
+               /// Note: behavior of a single StopFilter instance can be modified with
+               /// {@link #SetEnablePositionIncrements(boolean)}. This static method allows
+               /// control over behavior of classes using StopFilters internally, for
+               /// example {@link Mono.Lucene.Net.Analysis.Standard.StandardAnalyzer
+               /// StandardAnalyzer} if used with the no-arg ctor.
+               /// <p/>
+               /// Default : false.
+               /// 
+               /// </summary>
+               /// <seealso cref="setEnablePositionIncrements(bool)">
+               /// </seealso>
+               /// <deprecated> Please specify this when you create the StopFilter
+               /// </deprecated>
+        [Obsolete("Please specify this when you create the StopFilter")]
+               public static void  SetEnablePositionIncrementsDefault(bool defaultValue)
+               {
+                       ENABLE_POSITION_INCREMENTS_DEFAULT = defaultValue;
+               }
+               
+               /// <seealso cref="SetEnablePositionIncrements(bool)">
+               /// </seealso>
+               public bool GetEnablePositionIncrements()
+               {
+                       return enablePositionIncrements;
+               }
+               
+               /// <summary> If <code>true</code>, this StopFilter will preserve
+               /// positions of the incoming tokens (ie, accumulate and
+               /// set position increments of the removed stop tokens).
+               /// Generally, <code>true</code> is best as it does not
+               /// lose information (positions of the original tokens)
+               /// during indexing.
+               /// 
+               /// <p/> When set, when a token is stopped
+               /// (omitted), the position increment of the following
+               /// token is incremented.
+               /// 
+               /// <p/> <b>NOTE</b>: be sure to also
+               /// set {@link QueryParser#setEnablePositionIncrements} if
+               /// you use QueryParser to create queries.
+               /// </summary>
+               public void  SetEnablePositionIncrements(bool enable)
+               {
+                       this.enablePositionIncrements = enable;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/TeeSinkTokenFilter.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/TeeSinkTokenFilter.cs
new file mode 100644 (file)
index 0000000..1fd8712
--- /dev/null
@@ -0,0 +1,259 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using AttributeImpl = Mono.Lucene.Net.Util.AttributeImpl;
+using AttributeSource = Mono.Lucene.Net.Util.AttributeSource;
+
+namespace Mono.Lucene.Net.Analysis
+{
+       
+       /// <summary> This TokenFilter provides the ability to set aside attribute states
+       /// that have already been analyzed.  This is useful in situations where multiple fields share
+       /// many common analysis steps and then go their separate ways.
+       /// <p/>
+       /// It is also useful for doing things like entity extraction or proper noun analysis as
+       /// part of the analysis workflow and saving off those tokens for use in another field.
+       /// 
+       /// <pre>
+       /// TeeSinkTokenFilter source1 = new TeeSinkTokenFilter(new WhitespaceTokenizer(reader1));
+       /// TeeSinkTokenFilter.SinkTokenStream sink1 = source1.newSinkTokenStream();
+       /// TeeSinkTokenFilter.SinkTokenStream sink2 = source1.newSinkTokenStream();
+       /// TeeSinkTokenFilter source2 = new TeeSinkTokenFilter(new WhitespaceTokenizer(reader2));
+       /// source2.addSinkTokenStream(sink1);
+       /// source2.addSinkTokenStream(sink2);
+       /// TokenStream final1 = new LowerCaseFilter(source1);
+       /// TokenStream final2 = source2;
+       /// TokenStream final3 = new EntityDetect(sink1);
+       /// TokenStream final4 = new URLDetect(sink2);
+       /// d.add(new Field("f1", final1));
+       /// d.add(new Field("f2", final2));
+       /// d.add(new Field("f3", final3));
+       /// d.add(new Field("f4", final4));
+       /// </pre>
+       /// In this example, <code>sink1</code> and <code>sink2</code> will both get tokens from both
+       /// <code>reader1</code> and <code>reader2</code> after whitespace tokenizer
+       /// and now we can further wrap any of these in extra analysis, and more "sources" can be inserted if desired.
+       /// It is important, that tees are consumed before sinks (in the above example, the field names must be
+       /// less the sink's field names). If you are not sure, which stream is consumed first, you can simply
+       /// add another sink and then pass all tokens to the sinks at once using {@link #consumeAllTokens}.
+       /// This TokenFilter is exhausted after this. In the above example, change
+       /// the example above to:
+       /// <pre>
+       /// ...
+       /// TokenStream final1 = new LowerCaseFilter(source1.newSinkTokenStream());
+       /// TokenStream final2 = source2.newSinkTokenStream();
+       /// sink1.consumeAllTokens();
+       /// sink2.consumeAllTokens();
+       /// ...
+       /// </pre>
+       /// In this case, the fields can be added in any order, because the sources are not used anymore and all sinks are ready.
+       /// <p/>Note, the EntityDetect and URLDetect TokenStreams are for the example and do not currently exist in Lucene.
+       /// </summary>
+       public sealed class TeeSinkTokenFilter:TokenFilter
+       {
+               public class AnonymousClassSinkFilter:SinkFilter
+               {
+                       public override bool Accept(AttributeSource source)
+                       {
+                               return true;
+                       }
+               }
+               private System.Collections.IList sinks = new System.Collections.ArrayList();
+               
+               /// <summary> Instantiates a new TeeSinkTokenFilter.</summary>
+               public TeeSinkTokenFilter(TokenStream input):base(input)
+               {
+               }
+               
+               /// <summary> Returns a new {@link SinkTokenStream} that receives all tokens consumed by this stream.</summary>
+               public SinkTokenStream NewSinkTokenStream()
+               {
+                       return NewSinkTokenStream(ACCEPT_ALL_FILTER);
+               }
+               
+               /// <summary> Returns a new {@link SinkTokenStream} that receives all tokens consumed by this stream
+               /// that pass the supplied filter.
+               /// </summary>
+               /// <seealso cref="SinkFilter">
+               /// </seealso>
+               public SinkTokenStream NewSinkTokenStream(SinkFilter filter)
+               {
+                       SinkTokenStream sink = new SinkTokenStream(this.CloneAttributes(), filter);
+                       this.sinks.Add(new System.WeakReference(sink));
+                       return sink;
+               }
+               
+               /// <summary> Adds a {@link SinkTokenStream} created by another <code>TeeSinkTokenFilter</code>
+               /// to this one. The supplied stream will also receive all consumed tokens.
+               /// This method can be used to pass tokens from two different tees to one sink.
+               /// </summary>
+               public void  AddSinkTokenStream(SinkTokenStream sink)
+               {
+                       // check that sink has correct factory
+                       if (!this.GetAttributeFactory().Equals(sink.GetAttributeFactory()))
+                       {
+                               throw new System.ArgumentException("The supplied sink is not compatible to this tee");
+                       }
+                       // add eventually missing attribute impls to the existing sink
+            foreach (AttributeImpl impl in this.CloneAttributes().GetAttributeImplsIterator())
+            {
+                sink.AddAttributeImpl(impl);
+            }
+                       this.sinks.Add(new System.WeakReference(sink));
+               }
+               
+               /// <summary> <code>TeeSinkTokenFilter</code> passes all tokens to the added sinks
+               /// when itself is consumed. To be sure, that all tokens from the input
+               /// stream are passed to the sinks, you can call this methods.
+               /// This instance is exhausted after this, but all sinks are instant available.
+               /// </summary>
+               public void  ConsumeAllTokens()
+               {
+                       while (IncrementToken())
+                               ;
+               }
+               
+               public override bool IncrementToken()
+               {
+                       if (input.IncrementToken())
+                       {
+                               // capture state lazily - maybe no SinkFilter accepts this state
+                               AttributeSource.State state = null;
+                               for (System.Collections.IEnumerator it = sinks.GetEnumerator(); it.MoveNext(); )
+                               {
+                                       SinkTokenStream sink = (SinkTokenStream) ((System.WeakReference) it.Current).Target;
+                                       if (sink != null)
+                                       {
+                                               if (sink.Accept(this))
+                                               {
+                                                       if (state == null)
+                                                       {
+                                                               state = this.CaptureState();
+                                                       }
+                                                       sink.AddState(state);
+                                               }
+                                       }
+                               }
+                               return true;
+                       }
+                       
+                       return false;
+               }
+               
+               public override void  End()
+               {
+                       base.End();
+                       AttributeSource.State finalState = CaptureState();
+                       for (System.Collections.IEnumerator it = sinks.GetEnumerator(); it.MoveNext(); )
+                       {
+                               SinkTokenStream sink = (SinkTokenStream) ((System.WeakReference) it.Current).Target;
+                               if (sink != null)
+                               {
+                                       sink.SetFinalState(finalState);
+                               }
+                       }
+               }
+               
+               /// <summary> A filter that decides which {@link AttributeSource} states to store in the sink.</summary>
+               public abstract class SinkFilter
+               {
+                       /// <summary> Returns true, iff the current state of the passed-in {@link AttributeSource} shall be stored
+                       /// in the sink. 
+                       /// </summary>
+                       public abstract bool Accept(AttributeSource source);
+                       
+                       /// <summary> Called by {@link SinkTokenStream#Reset()}. This method does nothing by default
+                       /// and can optionally be overridden.
+                       /// </summary>
+                       public void  Reset()
+                       {
+                               // nothing to do; can be overridden
+                       }
+               }
+               
+               public sealed class SinkTokenStream:TokenStream
+               {
+                       private System.Collections.IList cachedStates = new System.Collections.ArrayList();
+                       private AttributeSource.State finalState;
+                       private System.Collections.IEnumerator it = null;
+                       private SinkFilter filter;
+                       
+                       internal SinkTokenStream(AttributeSource source, SinkFilter filter):base(source)
+                       {
+                               this.filter = filter;
+                       }
+                       
+                       internal /*private*/ bool Accept(AttributeSource source)
+                       {
+                               return filter.Accept(source);
+                       }
+                       
+                       internal /*private*/ void  AddState(AttributeSource.State state)
+                       {
+                               if (it != null)
+                               {
+                                       throw new System.SystemException("The tee must be consumed before sinks are consumed.");
+                               }
+                               cachedStates.Add(state);
+                       }
+                       
+                       internal /*private*/ void  SetFinalState(AttributeSource.State finalState)
+                       {
+                               this.finalState = finalState;
+                       }
+                       
+                       public override bool IncrementToken()
+                       {
+                               // lazy init the iterator
+                               if (it == null)
+                               {
+                                       it = cachedStates.GetEnumerator();
+                               }
+                               
+                               if (!it.MoveNext())
+                               {
+                                       return false;
+                               }
+                               
+                               AttributeSource.State state = (State) it.Current;
+                               RestoreState(state);
+                               return true;
+                       }
+                       
+                       public override void  End()
+                       {
+                               if (finalState != null)
+                               {
+                                       RestoreState(finalState);
+                               }
+                       }
+                       
+                       public override void  Reset()
+                       {
+                               it = cachedStates.GetEnumerator();
+                       }
+               }
+               
+               private static readonly SinkFilter ACCEPT_ALL_FILTER;
+               static TeeSinkTokenFilter()
+               {
+                       ACCEPT_ALL_FILTER = new AnonymousClassSinkFilter();
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/TeeTokenFilter.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/TeeTokenFilter.cs
new file mode 100644 (file)
index 0000000..588c1bf
--- /dev/null
@@ -0,0 +1,83 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Analysis
+{
+       
+       
+       /// <summary> Works in conjunction with the SinkTokenizer to provide the ability to set aside tokens
+       /// that have already been analyzed.  This is useful in situations where multiple fields share
+       /// many common analysis steps and then go their separate ways.
+       /// <p/>
+       /// It is also useful for doing things like entity extraction or proper noun analysis as
+       /// part of the analysis workflow and saving off those tokens for use in another field.
+       /// 
+       /// <pre>
+       /// SinkTokenizer sink1 = new SinkTokenizer();
+       /// SinkTokenizer sink2 = new SinkTokenizer();
+       /// TokenStream source1 = new TeeTokenFilter(new TeeTokenFilter(new WhitespaceTokenizer(reader1), sink1), sink2);
+       /// TokenStream source2 = new TeeTokenFilter(new TeeTokenFilter(new WhitespaceTokenizer(reader2), sink1), sink2);
+       /// TokenStream final1 = new LowerCaseFilter(source1);
+       /// TokenStream final2 = source2;
+       /// TokenStream final3 = new EntityDetect(sink1);
+       /// TokenStream final4 = new URLDetect(sink2);
+       /// d.add(new Field("f1", final1));
+       /// d.add(new Field("f2", final2));
+       /// d.add(new Field("f3", final3));
+       /// d.add(new Field("f4", final4));
+       /// </pre>
+       /// In this example, <code>sink1</code> and <code>sink2</code> will both get tokens from both
+       /// <code>reader1</code> and <code>reader2</code> after whitespace tokenizer
+       /// and now we can further wrap any of these in extra analysis, and more "sources" can be inserted if desired.
+       /// It is important, that tees are consumed before sinks (in the above example, the field names must be
+       /// less the sink's field names).
+       /// Note, the EntityDetect and URLDetect TokenStreams are for the example and do not currently exist in Lucene
+       /// <p/>
+       /// 
+       /// See <a href="http://issues.apache.org/jira/browse/LUCENE-1058">LUCENE-1058</a>.
+       /// <p/>
+       /// WARNING: {@link TeeTokenFilter} and {@link SinkTokenizer} only work with the old TokenStream API.
+       /// If you switch to the new API, you need to use {@link TeeSinkTokenFilter} instead, which offers 
+       /// the same functionality.
+       /// </summary>
+       /// <seealso cref="SinkTokenizer">
+       /// </seealso>
+       /// <deprecated> Use {@link TeeSinkTokenFilter} instead
+       /// 
+       /// </deprecated>
+    [Obsolete("Use TeeSinkTokenFilter instead")]
+       public class TeeTokenFilter:TokenFilter
+       {
+               internal SinkTokenizer sink;
+               
+               public TeeTokenFilter(TokenStream input, SinkTokenizer sink):base(input)
+               {
+                       this.sink = sink;
+               }
+
+        [Obsolete("Mono.Lucene.Net-2.9.1. This method overrides obsolete member Mono.Lucene.Net.Analysis.TokenStream.Next(Mono.Lucene.Net.Analysis.Token)")]
+               public override Token Next(Token reusableToken)
+               {
+                       System.Diagnostics.Debug.Assert(reusableToken != null);
+                       Token nextToken = input.Next(reusableToken);
+                       sink.Add(nextToken);
+                       return nextToken;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/Token.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/Token.cs
new file mode 100644 (file)
index 0000000..dddafdf
--- /dev/null
@@ -0,0 +1,1018 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using FlagsAttribute = Mono.Lucene.Net.Analysis.Tokenattributes.FlagsAttribute;
+using OffsetAttribute = Mono.Lucene.Net.Analysis.Tokenattributes.OffsetAttribute;
+using PayloadAttribute = Mono.Lucene.Net.Analysis.Tokenattributes.PayloadAttribute;
+using PositionIncrementAttribute = Mono.Lucene.Net.Analysis.Tokenattributes.PositionIncrementAttribute;
+using TermAttribute = Mono.Lucene.Net.Analysis.Tokenattributes.TermAttribute;
+using TypeAttribute = Mono.Lucene.Net.Analysis.Tokenattributes.TypeAttribute;
+using Payload = Mono.Lucene.Net.Index.Payload;
+using TermPositions = Mono.Lucene.Net.Index.TermPositions;
+using ArrayUtil = Mono.Lucene.Net.Util.ArrayUtil;
+using Attribute = Mono.Lucene.Net.Util.Attribute;
+using AttributeImpl = Mono.Lucene.Net.Util.AttributeImpl;
+
+namespace Mono.Lucene.Net.Analysis
+{
+       
+       /// <summary>A Token is an occurrence of a term from the text of a field.  It consists of
+       /// a term's text, the start and end offset of the term in the text of the field,
+       /// and a type string.
+       /// <p/>
+       /// The start and end offsets permit applications to re-associate a token with
+       /// its source text, e.g., to display highlighted query terms in a document
+       /// browser, or to show matching text fragments in a <abbr
+       /// title="KeyWord In Context">KWIC</abbr> display, etc.
+       /// <p/>
+       /// The type is a string, assigned by a lexical analyzer
+       /// (a.k.a. tokenizer), naming the lexical or syntactic class that the token
+       /// belongs to.  For example an end of sentence marker token might be implemented
+       /// with type "eos".  The default token type is "word".  
+       /// <p/>
+       /// A Token can optionally have metadata (a.k.a. Payload) in the form of a variable
+       /// length byte array. Use {@link TermPositions#GetPayloadLength()} and 
+       /// {@link TermPositions#GetPayload(byte[], int)} to retrieve the payloads from the index.
+       /// </summary>
+       /// <summary><br/><br/>
+       /// </summary>
+       /// <summary><p/><b>NOTE:</b> As of 2.9, Token implements all {@link Attribute} interfaces
+       /// that are part of core Lucene and can be found in the {@code tokenattributes} subpackage.
+       /// Even though it is not necessary to use Token anymore, with the new TokenStream API it can
+       /// be used as convenience class that implements all {@link Attribute}s, which is especially useful
+       /// to easily switch from the old to the new TokenStream API.
+       /// </summary>
+       /// <summary><br/><br/>
+       /// <p/><b>NOTE:</b> As of 2.3, Token stores the term text
+       /// internally as a malleable char[] termBuffer instead of
+       /// String termText.  The indexing code and core tokenizers
+       /// have been changed to re-use a single Token instance, changing
+       /// its buffer and other fields in-place as the Token is
+       /// processed.  This provides substantially better indexing
+       /// performance as it saves the GC cost of new'ing a Token and
+       /// String for every term.  The APIs that accept String
+       /// termText are still available but a warning about the
+       /// associated performance cost has been added (below).  The
+       /// {@link #TermText()} method has been deprecated.<p/>
+       /// </summary>
+       /// <summary><p/>Tokenizers and TokenFilters should try to re-use a Token instance when
+       /// possible for best performance, by implementing the
+       /// {@link TokenStream#IncrementToken()} API.
+       /// Failing that, to create a new Token you should first use
+       /// one of the constructors that starts with null text.  To load
+       /// the token from a char[] use {@link #SetTermBuffer(char[], int, int)}.
+       /// To load from a String use {@link #SetTermBuffer(String)} or {@link #SetTermBuffer(String, int, int)}.
+       /// Alternatively you can get the Token's termBuffer by calling either {@link #TermBuffer()},
+       /// if you know that your text is shorter than the capacity of the termBuffer
+       /// or {@link #ResizeTermBuffer(int)}, if there is any possibility
+       /// that you may need to grow the buffer. Fill in the characters of your term into this
+       /// buffer, with {@link String#getChars(int, int, char[], int)} if loading from a string,
+       /// or with {@link System#arraycopy(Object, int, Object, int, int)}, and finally call {@link #SetTermLength(int)} to
+       /// set the length of the term text.  See <a target="_top"
+       /// href="https://issues.apache.org/jira/browse/LUCENE-969">LUCENE-969</a>
+       /// for details.<p/>
+       /// <p/>Typical Token reuse patterns:
+       /// <ul>
+       /// <li> Copying text from a string (type is reset to {@link #DEFAULT_TYPE} if not
+       /// specified):<br/>
+       /// <pre>
+       /// return reusableToken.reinit(string, startOffset, endOffset[, type]);
+       /// </pre>
+       /// </li>
+       /// <li> Copying some text from a string (type is reset to {@link #DEFAULT_TYPE}
+       /// if not specified):<br/>
+       /// <pre>
+       /// return reusableToken.reinit(string, 0, string.length(), startOffset, endOffset[, type]);
+       /// </pre>
+       /// </li>
+       /// <li> Copying text from char[] buffer (type is reset to {@link #DEFAULT_TYPE}
+       /// if not specified):<br/>
+       /// <pre>
+       /// return reusableToken.reinit(buffer, 0, buffer.length, startOffset, endOffset[, type]);
+       /// </pre>
+       /// </li>
+       /// <li> Copying some text from a char[] buffer (type is reset to
+       /// {@link #DEFAULT_TYPE} if not specified):<br/>
+       /// <pre>
+       /// return reusableToken.reinit(buffer, start, end - start, startOffset, endOffset[, type]);
+       /// </pre>
+       /// </li>
+       /// <li> Copying from one one Token to another (type is reset to
+       /// {@link #DEFAULT_TYPE} if not specified):<br/>
+       /// <pre>
+       /// return reusableToken.reinit(source.termBuffer(), 0, source.termLength(), source.startOffset(), source.endOffset()[, source.type()]);
+       /// </pre>
+       /// </li>
+       /// </ul>
+       /// A few things to note:
+       /// <ul>
+       /// <li>clear() initializes all of the fields to default values. This was changed in contrast to Lucene 2.4, but should affect no one.</li>
+       /// <li>Because <code>TokenStreams</code> can be chained, one cannot assume that the <code>Token's</code> current type is correct.</li>
+       /// <li>The startOffset and endOffset represent the start and offset in the
+       /// source text, so be careful in adjusting them.</li>
+       /// <li>When caching a reusable token, clone it. When injecting a cached token into a stream that can be reset, clone it again.</li>
+       /// </ul>
+       /// <p/>
+       /// </summary>
+       /// <seealso cref="Mono.Lucene.Net.Index.Payload">
+       /// </seealso>
+       [Serializable]
+       public class Token:AttributeImpl, System.ICloneable, TermAttribute, TypeAttribute, PositionIncrementAttribute, FlagsAttribute, OffsetAttribute, PayloadAttribute
+       {
+               
+               public const System.String DEFAULT_TYPE = "word";
+               
+               private static int MIN_BUFFER_SIZE = 10;
+               
+               /// <deprecated> We will remove this when we remove the
+               /// deprecated APIs 
+               /// </deprecated>
+        [Obsolete("We will remove this when we remove the deprecated APIs")]
+               private System.String termText;
+               
+               /// <summary> Characters for the term text.</summary>
+               /// <deprecated> This will be made private. Instead, use:
+               /// {@link #TermBuffer()}, 
+               /// {@link #SetTermBuffer(char[], int, int)},
+               /// {@link #SetTermBuffer(String)}, or
+               /// {@link #SetTermBuffer(String, int, int)}
+               /// </deprecated>
+        [Obsolete("This will be made private. Instead, use: TermBuffer(), SetTermBuffer(char[], int, int), SetTermBuffer(String) or SetTermBuffer(String, int, int)")]
+               internal char[] termBuffer;
+               
+               /// <summary> Length of term text in the buffer.</summary>
+               /// <deprecated> This will be made private. Instead, use:
+               /// {@link #TermLength()}, or @{link setTermLength(int)}.
+               /// </deprecated>
+        [Obsolete("This will be made private. Instead, use: TermLength(), or setTermLength(int)")]
+               internal int termLength;
+               
+               /// <summary> Start in source text.</summary>
+               /// <deprecated> This will be made private. Instead, use:
+               /// {@link #StartOffset()}, or @{link setStartOffset(int)}.
+               /// </deprecated>
+        [Obsolete("This will be made private. Instead, use: StartOffset(), or SetStartOffset(int).")]
+               internal int startOffset;
+               
+               /// <summary> End in source text.</summary>
+               /// <deprecated> This will be made private. Instead, use:
+               /// {@link #EndOffset()}, or @{link setEndOffset(int)}.
+               /// </deprecated>
+        [Obsolete("This will be made private. Instead, use: EndOffset(), or SetEndOffset(int).")]
+               internal int endOffset;
+               
+               /// <summary> The lexical type of the token.</summary>
+               /// <deprecated> This will be made private. Instead, use:
+               /// {@link #Type()}, or @{link setType(String)}.
+               /// </deprecated>
+        [Obsolete("This will be made private. Instead, use: Type(), or SetType(String).")]
+               internal System.String type = DEFAULT_TYPE;
+               
+               private int flags;
+               
+               /// <deprecated> This will be made private. Instead, use:
+               /// {@link #GetPayload()}, or @{link setPayload(Payload)}.
+               /// </deprecated>
+        [Obsolete("This will be made private. Instead, use: GetPayload(), or SetPayload(Payload).")]
+               internal Payload payload;
+               
+               /// <deprecated> This will be made private. Instead, use:
+               /// {@link #GetPositionIncrement()}, or @{link setPositionIncrement(String)}.
+               /// </deprecated>
+        [Obsolete("This will be made private. Instead, use: GetPositionIncrement(), or SetPositionIncrement(String).")]
+               internal int positionIncrement = 1;
+               
+               /// <summary>Constructs a Token will null text. </summary>
+               public Token()
+               {
+               }
+               
+               /// <summary>Constructs a Token with null text and start &amp; end
+               /// offsets.
+               /// </summary>
+               /// <param name="start">start offset in the source text
+               /// </param>
+               /// <param name="end">end offset in the source text 
+               /// </param>
+               public Token(int start, int end)
+               {
+                       startOffset = start;
+                       endOffset = end;
+               }
+               
+               /// <summary>Constructs a Token with null text and start &amp; end
+               /// offsets plus the Token type.
+               /// </summary>
+               /// <param name="start">start offset in the source text
+               /// </param>
+               /// <param name="end">end offset in the source text
+               /// </param>
+               /// <param name="typ">the lexical type of this Token 
+               /// </param>
+               public Token(int start, int end, System.String typ)
+               {
+                       startOffset = start;
+                       endOffset = end;
+                       type = typ;
+               }
+               
+               /// <summary> Constructs a Token with null text and start &amp; end
+               /// offsets plus flags. NOTE: flags is EXPERIMENTAL.
+               /// </summary>
+               /// <param name="start">start offset in the source text
+               /// </param>
+               /// <param name="end">end offset in the source text
+               /// </param>
+               /// <param name="flags">The bits to set for this token
+               /// </param>
+               public Token(int start, int end, int flags)
+               {
+                       startOffset = start;
+                       endOffset = end;
+                       this.flags = flags;
+               }
+               
+               /// <summary>Constructs a Token with the given term text, and start
+               /// &amp; end offsets.  The type defaults to "word."
+               /// <b>NOTE:</b> for better indexing speed you should
+               /// instead use the char[] termBuffer methods to set the
+               /// term text.
+               /// </summary>
+               /// <param name="text">term text
+               /// </param>
+               /// <param name="start">start offset
+               /// </param>
+               /// <param name="end">end offset
+               /// </param>
+               public Token(System.String text, int start, int end)
+               {
+                       termText = text;
+                       startOffset = start;
+                       endOffset = end;
+               }
+               
+               /// <summary>Constructs a Token with the given text, start and end
+               /// offsets, &amp; type.  <b>NOTE:</b> for better indexing
+               /// speed you should instead use the char[] termBuffer
+               /// methods to set the term text.
+               /// </summary>
+               /// <param name="text">term text
+               /// </param>
+               /// <param name="start">start offset
+               /// </param>
+               /// <param name="end">end offset
+               /// </param>
+               /// <param name="typ">token type
+               /// </param>
+               public Token(System.String text, int start, int end, System.String typ)
+               {
+                       termText = text;
+                       startOffset = start;
+                       endOffset = end;
+                       type = typ;
+               }
+               
+               /// <summary>  Constructs a Token with the given text, start and end
+               /// offsets, &amp; type.  <b>NOTE:</b> for better indexing
+               /// speed you should instead use the char[] termBuffer
+               /// methods to set the term text.
+               /// </summary>
+               /// <param name="text">
+               /// </param>
+               /// <param name="start">
+               /// </param>
+               /// <param name="end">
+               /// </param>
+               /// <param name="flags">token type bits
+               /// </param>
+               public Token(System.String text, int start, int end, int flags)
+               {
+                       termText = text;
+                       startOffset = start;
+                       endOffset = end;
+                       this.flags = flags;
+               }
+               
+               /// <summary>  Constructs a Token with the given term buffer (offset
+               /// &amp; length), start and end
+               /// offsets
+               /// </summary>
+               /// <param name="startTermBuffer">
+               /// </param>
+               /// <param name="termBufferOffset">
+               /// </param>
+               /// <param name="termBufferLength">
+               /// </param>
+               /// <param name="start">
+               /// </param>
+               /// <param name="end">
+               /// </param>
+               public Token(char[] startTermBuffer, int termBufferOffset, int termBufferLength, int start, int end)
+               {
+                       SetTermBuffer(startTermBuffer, termBufferOffset, termBufferLength);
+                       startOffset = start;
+                       endOffset = end;
+               }
+               
+               /// <summary>Set the position increment.  This determines the position of this token
+               /// relative to the previous Token in a {@link TokenStream}, used in phrase
+               /// searching.
+               /// 
+               /// <p/>The default value is one.
+               /// 
+               /// <p/>Some common uses for this are:<ul>
+               /// 
+               /// <li>Set it to zero to put multiple terms in the same position.  This is
+               /// useful if, e.g., a word has multiple stems.  Searches for phrases
+               /// including either stem will match.  In this case, all but the first stem's
+               /// increment should be set to zero: the increment of the first instance
+               /// should be one.  Repeating a token with an increment of zero can also be
+               /// used to boost the scores of matches on that token.</li>
+               /// 
+               /// <li>Set it to values greater than one to inhibit exact phrase matches.
+               /// If, for example, one does not want phrases to match across removed stop
+               /// words, then one could build a stop word filter that removes stop words and
+               /// also sets the increment to the number of stop words removed before each
+               /// non-stop word.  Then exact phrase queries will only match when the terms
+               /// occur with no intervening stop words.</li>
+               /// 
+               /// </ul>
+               /// </summary>
+               /// <param name="positionIncrement">the distance from the prior term
+               /// </param>
+               /// <seealso cref="Mono.Lucene.Net.Index.TermPositions">
+               /// </seealso>
+               public virtual void  SetPositionIncrement(int positionIncrement)
+               {
+                       if (positionIncrement < 0)
+                               throw new System.ArgumentException("Increment must be zero or greater: " + positionIncrement);
+                       this.positionIncrement = positionIncrement;
+               }
+               
+               /// <summary>Returns the position increment of this Token.</summary>
+               /// <seealso cref="setPositionIncrement">
+               /// </seealso>
+               public virtual int GetPositionIncrement()
+               {
+                       return positionIncrement;
+               }
+               
+               /// <summary>Sets the Token's term text.  <b>NOTE:</b> for better
+               /// indexing speed you should instead use the char[]
+               /// termBuffer methods to set the term text.
+               /// </summary>
+               /// <deprecated> use {@link #SetTermBuffer(char[], int, int)} or
+               /// {@link #SetTermBuffer(String)} or
+               /// {@link #SetTermBuffer(String, int, int)}.
+               /// </deprecated>
+        [Obsolete("Use SetTermBuffer(char[], int, int) or SetTermBuffer(String) or SetTermBuffer(String, int, int)")]
+               public virtual void  SetTermText(System.String text)
+               {
+                       termText = text;
+                       termBuffer = null;
+               }
+               
+               /// <summary>Returns the Token's term text.
+               /// 
+               /// </summary>
+               /// <deprecated> This method now has a performance penalty
+               /// because the text is stored internally in a char[].  If
+               /// possible, use {@link #TermBuffer()} and {@link
+               /// #TermLength()} directly instead.  If you really need a
+               /// String, use {@link #Term()}
+               /// </deprecated>
+               public System.String TermText()
+               {
+                       if (termText == null && termBuffer != null)
+                               termText = new System.String(termBuffer, 0, termLength);
+                       return termText;
+               }
+               
+               /// <summary>Returns the Token's term text.
+               /// 
+               /// This method has a performance penalty
+               /// because the text is stored internally in a char[].  If
+               /// possible, use {@link #TermBuffer()} and {@link
+               /// #TermLength()} directly instead.  If you really need a
+               /// String, use this method, which is nothing more than
+               /// a convenience call to <b>new String(token.termBuffer(), 0, token.termLength())</b>
+               /// </summary>
+               public System.String Term()
+               {
+                       if (termText != null)
+                               return termText;
+                       InitTermBuffer();
+                       return new System.String(termBuffer, 0, termLength);
+               }
+               
+               /// <summary>Copies the contents of buffer, starting at offset for
+               /// length characters, into the termBuffer array.
+               /// </summary>
+               /// <param name="buffer">the buffer to copy
+               /// </param>
+               /// <param name="offset">the index in the buffer of the first character to copy
+               /// </param>
+               /// <param name="length">the number of characters to copy
+               /// </param>
+               public void  SetTermBuffer(char[] buffer, int offset, int length)
+               {
+                       termText = null;
+                       GrowTermBuffer(length);
+                       Array.Copy(buffer, offset, termBuffer, 0, length);
+                       termLength = length;
+               }
+               
+               /// <summary>Copies the contents of buffer into the termBuffer array.</summary>
+               /// <param name="buffer">the buffer to copy
+               /// </param>
+               public void  SetTermBuffer(System.String buffer)
+               {
+                       termText = null;
+                       int length = buffer.Length;
+                       GrowTermBuffer(length);
+                       SupportClass.TextSupport.GetCharsFromString(buffer, 0, length, termBuffer, 0);
+                       termLength = length;
+               }
+               
+               /// <summary>Copies the contents of buffer, starting at offset and continuing
+               /// for length characters, into the termBuffer array.
+               /// </summary>
+               /// <param name="buffer">the buffer to copy
+               /// </param>
+               /// <param name="offset">the index in the buffer of the first character to copy
+               /// </param>
+               /// <param name="length">the number of characters to copy
+               /// </param>
+               public void  SetTermBuffer(System.String buffer, int offset, int length)
+               {
+                       System.Diagnostics.Debug.Assert(offset <= buffer.Length);
+                       System.Diagnostics.Debug.Assert(offset + length <= buffer.Length);
+                       termText = null;
+                       GrowTermBuffer(length);
+                       SupportClass.TextSupport.GetCharsFromString(buffer, offset, offset + length, termBuffer, 0);
+                       termLength = length;
+               }
+               
+               /// <summary>Returns the internal termBuffer character array which
+               /// you can then directly alter.  If the array is too
+               /// small for your token, use {@link
+               /// #ResizeTermBuffer(int)} to increase it.  After
+               /// altering the buffer be sure to call {@link
+               /// #setTermLength} to record the number of valid
+               /// characters that were placed into the termBuffer. 
+               /// </summary>
+               public char[] TermBuffer()
+               {
+                       InitTermBuffer();
+                       return termBuffer;
+               }
+               
+               /// <summary>Grows the termBuffer to at least size newSize, preserving the
+               /// existing content. Note: If the next operation is to change
+               /// the contents of the term buffer use
+               /// {@link #SetTermBuffer(char[], int, int)},
+               /// {@link #SetTermBuffer(String)}, or
+               /// {@link #SetTermBuffer(String, int, int)}
+               /// to optimally combine the resize with the setting of the termBuffer.
+               /// </summary>
+               /// <param name="newSize">minimum size of the new termBuffer
+               /// </param>
+               /// <returns> newly created termBuffer with length >= newSize
+               /// </returns>
+               public virtual char[] ResizeTermBuffer(int newSize)
+               {
+                       if (termBuffer == null)
+                       {
+                               // The buffer is always at least MIN_BUFFER_SIZE
+                               newSize = newSize < MIN_BUFFER_SIZE?MIN_BUFFER_SIZE:newSize;
+                               //Preserve termText 
+                               if (termText != null)
+                               {
+                                       int ttLen = termText.Length;
+                                       newSize = newSize < ttLen?ttLen:newSize;
+                                       termBuffer = new char[ArrayUtil.GetNextSize(newSize)];
+                                       SupportClass.TextSupport.GetCharsFromString(termText, 0, termText.Length, termBuffer, 0);
+                                       termText = null;
+                               }
+                               else
+                               {
+                                       // no term Text, the first allocation
+                                       termBuffer = new char[ArrayUtil.GetNextSize(newSize)];
+                               }
+                       }
+                       else
+                       {
+                               if (termBuffer.Length < newSize)
+                               {
+                                       // Not big enough; create a new array with slight
+                                       // over allocation and preserve content
+                                       char[] newCharBuffer = new char[ArrayUtil.GetNextSize(newSize)];
+                                       Array.Copy(termBuffer, 0, newCharBuffer, 0, termBuffer.Length);
+                                       termBuffer = newCharBuffer;
+                               }
+                       }
+                       return termBuffer;
+               }
+               
+               /// <summary>Allocates a buffer char[] of at least newSize, without preserving the existing content.
+               /// its always used in places that set the content 
+               /// </summary>
+               /// <param name="newSize">minimum size of the buffer
+               /// </param>
+               private void  GrowTermBuffer(int newSize)
+               {
+                       if (termBuffer == null)
+                       {
+                               // The buffer is always at least MIN_BUFFER_SIZE    
+                               termBuffer = new char[ArrayUtil.GetNextSize(newSize < MIN_BUFFER_SIZE?MIN_BUFFER_SIZE:newSize)];
+                       }
+                       else
+                       {
+                               if (termBuffer.Length < newSize)
+                               {
+                                       // Not big enough; create a new array with slight
+                                       // over allocation:
+                                       termBuffer = new char[ArrayUtil.GetNextSize(newSize)];
+                               }
+                       }
+               }
+               
+               
+               // TODO: once we remove the deprecated termText() method
+               // and switch entirely to char[] termBuffer we don't need
+               // to use this method anymore, only for late init of the buffer
+               private void  InitTermBuffer()
+               {
+                       if (termBuffer == null)
+                       {
+                               if (termText == null)
+                               {
+                                       termBuffer = new char[ArrayUtil.GetNextSize(MIN_BUFFER_SIZE)];
+                                       termLength = 0;
+                               }
+                               else
+                               {
+                                       int length = termText.Length;
+                                       if (length < MIN_BUFFER_SIZE)
+                                               length = MIN_BUFFER_SIZE;
+                                       termBuffer = new char[ArrayUtil.GetNextSize(length)];
+                                       termLength = termText.Length;
+                                       SupportClass.TextSupport.GetCharsFromString(termText, 0, termText.Length, termBuffer, 0);
+                                       termText = null;
+                               }
+                       }
+                       else
+                       {
+                               termText = null;
+                       }
+               }
+               
+               /// <summary>Return number of valid characters (length of the term)
+               /// in the termBuffer array. 
+               /// </summary>
+               public int TermLength()
+               {
+                       InitTermBuffer();
+                       return termLength;
+               }
+               
+               /// <summary>Set number of valid characters (length of the term) in
+               /// the termBuffer array. Use this to truncate the termBuffer
+               /// or to synchronize with external manipulation of the termBuffer.
+               /// Note: to grow the size of the array,
+               /// use {@link #ResizeTermBuffer(int)} first.
+               /// </summary>
+               /// <param name="length">the truncated length
+               /// </param>
+               public void  SetTermLength(int length)
+               {
+                       InitTermBuffer();
+                       if (length > termBuffer.Length)
+                               throw new System.ArgumentException("length " + length + " exceeds the size of the termBuffer (" + termBuffer.Length + ")");
+                       termLength = length;
+               }
+               
+               /// <summary>Returns this Token's starting offset, the position of the first character
+               /// corresponding to this token in the source text.
+               /// Note that the difference between endOffset() and startOffset() may not be
+               /// equal to termText.length(), as the term text may have been altered by a
+               /// stemmer or some other filter. 
+               /// </summary>
+               public int StartOffset()
+               {
+                       return startOffset;
+               }
+               
+               /// <summary>Set the starting offset.</summary>
+               /// <seealso cref="StartOffset()">
+               /// </seealso>
+               public virtual void  SetStartOffset(int offset)
+               {
+                       this.startOffset = offset;
+               }
+               
+               /// <summary>Returns this Token's ending offset, one greater than the position of the
+               /// last character corresponding to this token in the source text. The length
+               /// of the token in the source text is (endOffset - startOffset). 
+               /// </summary>
+               public int EndOffset()
+               {
+                       return endOffset;
+               }
+               
+               /// <summary>Set the ending offset.</summary>
+               /// <seealso cref="EndOffset()">
+               /// </seealso>
+               public virtual void  SetEndOffset(int offset)
+               {
+                       this.endOffset = offset;
+               }
+               
+               /// <summary>Set the starting and ending offset.
+               /// See StartOffset() and EndOffset()
+               /// </summary>
+               public virtual void  SetOffset(int startOffset, int endOffset)
+               {
+                       this.startOffset = startOffset;
+                       this.endOffset = endOffset;
+               }
+               
+               /// <summary>Returns this Token's lexical type.  Defaults to "word". </summary>
+               public System.String Type()
+               {
+                       return type;
+               }
+               
+               /// <summary>Set the lexical type.</summary>
+               /// <seealso cref="Type()">
+               /// </seealso>
+               public void  SetType(System.String type)
+               {
+                       this.type = type;
+               }
+               
+               /// <summary> EXPERIMENTAL:  While we think this is here to stay, we may want to change it to be a long.
+               /// <p/>
+               /// 
+               /// Get the bitset for any bits that have been set.  This is completely distinct from {@link #Type()}, although they do share similar purposes.
+               /// The flags can be used to encode information about the token for use by other {@link Mono.Lucene.Net.Analysis.TokenFilter}s.
+               /// 
+               /// 
+               /// </summary>
+               /// <returns> The bits
+               /// </returns>
+               public virtual int GetFlags()
+               {
+                       return flags;
+               }
+               
+               /// <seealso cref="GetFlags()">
+               /// </seealso>
+               public virtual void  SetFlags(int flags)
+               {
+                       this.flags = flags;
+               }
+               
+               /// <summary> Returns this Token's payload.</summary>
+               public virtual Payload GetPayload()
+               {
+                       return this.payload;
+               }
+               
+               /// <summary> Sets this Token's payload.</summary>
+               public virtual void  SetPayload(Payload payload)
+               {
+                       this.payload = payload;
+               }
+               
+               public override System.String ToString()
+               {
+                       System.Text.StringBuilder sb = new System.Text.StringBuilder();
+                       sb.Append('(');
+                       InitTermBuffer();
+                       if (termBuffer == null)
+                               sb.Append("null");
+                       else
+                               sb.Append(termBuffer, 0, termLength);
+                       sb.Append(',').Append(startOffset).Append(',').Append(endOffset);
+                       if (!type.Equals("word"))
+                               sb.Append(",type=").Append(type);
+                       if (positionIncrement != 1)
+                               sb.Append(",posIncr=").Append(positionIncrement);
+                       sb.Append(')');
+                       return sb.ToString();
+               }
+               
+               /// <summary>Resets the term text, payload, flags, and positionIncrement,
+               /// startOffset, endOffset and token type to default.
+               /// </summary>
+               public override void  Clear()
+               {
+                       payload = null;
+                       // Leave termBuffer to allow re-use
+                       termLength = 0;
+                       termText = null;
+                       positionIncrement = 1;
+                       flags = 0;
+                       startOffset = endOffset = 0;
+                       type = DEFAULT_TYPE;
+               }
+               
+               public override System.Object Clone()
+               {
+                       Token t = (Token) base.Clone();
+                       // Do a deep clone
+                       if (termBuffer != null)
+                       {
+                               t.termBuffer = new char[termBuffer.Length];
+                               termBuffer.CopyTo(t.termBuffer, 0);
+                       }
+                       if (payload != null)
+                       {
+                               t.payload = (Payload) payload.Clone();
+                       }
+                       return t;
+               }
+               
+               /// <summary>Makes a clone, but replaces the term buffer &amp;
+               /// start/end offset in the process.  This is more
+               /// efficient than doing a full clone (and then calling
+               /// setTermBuffer) because it saves a wasted copy of the old
+               /// termBuffer. 
+               /// </summary>
+               public virtual Token Clone(char[] newTermBuffer, int newTermOffset, int newTermLength, int newStartOffset, int newEndOffset)
+               {
+                       Token t = new Token(newTermBuffer, newTermOffset, newTermLength, newStartOffset, newEndOffset);
+                       t.positionIncrement = positionIncrement;
+                       t.flags = flags;
+                       t.type = type;
+                       if (payload != null)
+                               t.payload = (Payload) payload.Clone();
+                       return t;
+               }
+               
+               public  override bool Equals(System.Object obj)
+               {
+                       if (obj == this)
+                               return true;
+                       
+                       if (obj is Token)
+                       {
+                               Token other = (Token) obj;
+                               
+                               InitTermBuffer();
+                               other.InitTermBuffer();
+                               
+                               if (termLength == other.termLength && startOffset == other.startOffset && endOffset == other.endOffset && flags == other.flags && positionIncrement == other.positionIncrement && SubEqual(type, other.type) && SubEqual(payload, other.payload))
+                               {
+                                       for (int i = 0; i < termLength; i++)
+                                               if (termBuffer[i] != other.termBuffer[i])
+                                                       return false;
+                                       return true;
+                               }
+                               else
+                                       return false;
+                       }
+                       else
+                               return false;
+               }
+               
+               private bool SubEqual(System.Object o1, System.Object o2)
+               {
+                       if (o1 == null)
+                               return o2 == null;
+                       else
+                               return o1.Equals(o2);
+               }
+               
+               public override int GetHashCode()
+               {
+                       InitTermBuffer();
+                       int code = termLength;
+                       code = code * 31 + startOffset;
+                       code = code * 31 + endOffset;
+                       code = code * 31 + flags;
+                       code = code * 31 + positionIncrement;
+                       code = code * 31 + type.GetHashCode();
+                       code = (payload == null?code:code * 31 + payload.GetHashCode());
+                       code = code * 31 + ArrayUtil.HashCode(termBuffer, 0, termLength);
+                       return code;
+               }
+               
+               // like clear() but doesn't clear termBuffer/text
+               private void  ClearNoTermBuffer()
+               {
+                       payload = null;
+                       positionIncrement = 1;
+                       flags = 0;
+                       startOffset = endOffset = 0;
+                       type = DEFAULT_TYPE;
+               }
+               
+               /// <summary>Shorthand for calling {@link #clear},
+               /// {@link #SetTermBuffer(char[], int, int)},
+               /// {@link #setStartOffset},
+               /// {@link #setEndOffset},
+               /// {@link #setType}
+               /// </summary>
+               /// <returns> this Token instance 
+               /// </returns>
+               public virtual Token Reinit(char[] newTermBuffer, int newTermOffset, int newTermLength, int newStartOffset, int newEndOffset, System.String newType)
+               {
+                       ClearNoTermBuffer();
+                       payload = null;
+                       positionIncrement = 1;
+                       SetTermBuffer(newTermBuffer, newTermOffset, newTermLength);
+                       startOffset = newStartOffset;
+                       endOffset = newEndOffset;
+                       type = newType;
+                       return this;
+               }
+               
+               /// <summary>Shorthand for calling {@link #clear},
+               /// {@link #SetTermBuffer(char[], int, int)},
+               /// {@link #setStartOffset},
+               /// {@link #setEndOffset}
+               /// {@link #setType} on Token.DEFAULT_TYPE
+               /// </summary>
+               /// <returns> this Token instance 
+               /// </returns>
+               public virtual Token Reinit(char[] newTermBuffer, int newTermOffset, int newTermLength, int newStartOffset, int newEndOffset)
+               {
+                       ClearNoTermBuffer();
+                       SetTermBuffer(newTermBuffer, newTermOffset, newTermLength);
+                       startOffset = newStartOffset;
+                       endOffset = newEndOffset;
+                       type = DEFAULT_TYPE;
+                       return this;
+               }
+               
+               /// <summary>Shorthand for calling {@link #clear},
+               /// {@link #SetTermBuffer(String)},
+               /// {@link #setStartOffset},
+               /// {@link #setEndOffset}
+               /// {@link #setType}
+               /// </summary>
+               /// <returns> this Token instance 
+               /// </returns>
+               public virtual Token Reinit(System.String newTerm, int newStartOffset, int newEndOffset, System.String newType)
+               {
+                       ClearNoTermBuffer();
+                       SetTermBuffer(newTerm);
+                       startOffset = newStartOffset;
+                       endOffset = newEndOffset;
+                       type = newType;
+                       return this;
+               }
+               
+               /// <summary>Shorthand for calling {@link #clear},
+               /// {@link #SetTermBuffer(String, int, int)},
+               /// {@link #setStartOffset},
+               /// {@link #setEndOffset}
+               /// {@link #setType}
+               /// </summary>
+               /// <returns> this Token instance 
+               /// </returns>
+               public virtual Token Reinit(System.String newTerm, int newTermOffset, int newTermLength, int newStartOffset, int newEndOffset, System.String newType)
+               {
+                       ClearNoTermBuffer();
+                       SetTermBuffer(newTerm, newTermOffset, newTermLength);
+                       startOffset = newStartOffset;
+                       endOffset = newEndOffset;
+                       type = newType;
+                       return this;
+               }
+               
+               /// <summary>Shorthand for calling {@link #clear},
+               /// {@link #SetTermBuffer(String)},
+               /// {@link #setStartOffset},
+               /// {@link #setEndOffset}
+               /// {@link #setType} on Token.DEFAULT_TYPE
+               /// </summary>
+               /// <returns> this Token instance 
+               /// </returns>
+               public virtual Token Reinit(System.String newTerm, int newStartOffset, int newEndOffset)
+               {
+                       ClearNoTermBuffer();
+                       SetTermBuffer(newTerm);
+                       startOffset = newStartOffset;
+                       endOffset = newEndOffset;
+                       type = DEFAULT_TYPE;
+                       return this;
+               }
+               
+               /// <summary>Shorthand for calling {@link #clear},
+               /// {@link #SetTermBuffer(String, int, int)},
+               /// {@link #setStartOffset},
+               /// {@link #setEndOffset}
+               /// {@link #setType} on Token.DEFAULT_TYPE
+               /// </summary>
+               /// <returns> this Token instance 
+               /// </returns>
+               public virtual Token Reinit(System.String newTerm, int newTermOffset, int newTermLength, int newStartOffset, int newEndOffset)
+               {
+                       ClearNoTermBuffer();
+                       SetTermBuffer(newTerm, newTermOffset, newTermLength);
+                       startOffset = newStartOffset;
+                       endOffset = newEndOffset;
+                       type = DEFAULT_TYPE;
+                       return this;
+               }
+               
+               /// <summary> Copy the prototype token's fields into this one. Note: Payloads are shared.</summary>
+               /// <param name="prototype">
+               /// </param>
+               public virtual void  Reinit(Token prototype)
+               {
+                       prototype.InitTermBuffer();
+                       SetTermBuffer(prototype.termBuffer, 0, prototype.termLength);
+                       positionIncrement = prototype.positionIncrement;
+                       flags = prototype.flags;
+                       startOffset = prototype.startOffset;
+                       endOffset = prototype.endOffset;
+                       type = prototype.type;
+                       payload = prototype.payload;
+               }
+               
+               /// <summary> Copy the prototype token's fields into this one, with a different term. Note: Payloads are shared.</summary>
+               /// <param name="prototype">
+               /// </param>
+               /// <param name="newTerm">
+               /// </param>
+               public virtual void  Reinit(Token prototype, System.String newTerm)
+               {
+                       SetTermBuffer(newTerm);
+                       positionIncrement = prototype.positionIncrement;
+                       flags = prototype.flags;
+                       startOffset = prototype.startOffset;
+                       endOffset = prototype.endOffset;
+                       type = prototype.type;
+                       payload = prototype.payload;
+               }
+               
+               /// <summary> Copy the prototype token's fields into this one, with a different term. Note: Payloads are shared.</summary>
+               /// <param name="prototype">
+               /// </param>
+               /// <param name="newTermBuffer">
+               /// </param>
+               /// <param name="offset">
+               /// </param>
+               /// <param name="length">
+               /// </param>
+               public virtual void  Reinit(Token prototype, char[] newTermBuffer, int offset, int length)
+               {
+                       SetTermBuffer(newTermBuffer, offset, length);
+                       positionIncrement = prototype.positionIncrement;
+                       flags = prototype.flags;
+                       startOffset = prototype.startOffset;
+                       endOffset = prototype.endOffset;
+                       type = prototype.type;
+                       payload = prototype.payload;
+               }
+               
+               public override void  CopyTo(AttributeImpl target)
+               {
+                       if (target is Token)
+                       {
+                               Token to = (Token) target;
+                               to.Reinit(this);
+                               // reinit shares the payload, so clone it:
+                               if (payload != null)
+                               {
+                                       to.payload = (Payload) payload.Clone();
+                               }
+                               // remove the following optimization in 3.0 when old TokenStream API removed:
+                       }
+                       else if (target is TokenWrapper)
+                       {
+                               ((TokenWrapper) target).delegate_Renamed = (Token) this.Clone();
+                       }
+                       else
+                       {
+                               InitTermBuffer();
+                               ((TermAttribute) target).SetTermBuffer(termBuffer, 0, termLength);
+                               ((OffsetAttribute) target).SetOffset(startOffset, endOffset);
+                               ((PositionIncrementAttribute) target).SetPositionIncrement(positionIncrement);
+                               ((PayloadAttribute) target).SetPayload((payload == null)?null:(Payload) payload.Clone());
+                               ((FlagsAttribute) target).SetFlags(flags);
+                               ((TypeAttribute) target).SetType(type);
+                       }
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/TokenFilter.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/TokenFilter.cs
new file mode 100644 (file)
index 0000000..661d726
--- /dev/null
@@ -0,0 +1,62 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Analysis
+{
+       
+       /// <summary> A TokenFilter is a TokenStream whose input is another TokenStream.
+       /// <p/>
+       /// This is an abstract class; subclasses must override {@link #IncrementToken()}.
+       /// 
+       /// </summary>
+       /// <seealso cref="TokenStream">
+       /// </seealso>
+       public abstract class TokenFilter:TokenStream
+       {
+               /// <summary>The source of tokens for this filter. </summary>
+               protected internal TokenStream input;
+               
+               /// <summary>Construct a token stream filtering the given input. </summary>
+               protected internal TokenFilter(TokenStream input):base(input)
+               {
+                       this.input = input;
+               }
+               
+               /// <summary>Performs end-of-stream operations, if any, and calls then <code>end()</code> on the
+               /// input TokenStream.<p/> 
+               /// <b>NOTE:</b> Be sure to call <code>super.end()</code> first when overriding this method.
+               /// </summary>
+               public override void  End()
+               {
+                       input.End();
+               }
+               
+               /// <summary>Close the input TokenStream. </summary>
+               public override void  Close()
+               {
+                       input.Close();
+               }
+               
+               /// <summary>Reset the filter as well as the input TokenStream. </summary>
+               public override void  Reset()
+               {
+                       input.Reset();
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/TokenStream.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/TokenStream.cs
new file mode 100644 (file)
index 0000000..d8003e4
--- /dev/null
@@ -0,0 +1,505 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using FlagsAttribute = Mono.Lucene.Net.Analysis.Tokenattributes.FlagsAttribute;
+using OffsetAttribute = Mono.Lucene.Net.Analysis.Tokenattributes.OffsetAttribute;
+using PayloadAttribute = Mono.Lucene.Net.Analysis.Tokenattributes.PayloadAttribute;
+using PositionIncrementAttribute = Mono.Lucene.Net.Analysis.Tokenattributes.PositionIncrementAttribute;
+using TermAttribute = Mono.Lucene.Net.Analysis.Tokenattributes.TermAttribute;
+using TypeAttribute = Mono.Lucene.Net.Analysis.Tokenattributes.TypeAttribute;
+using Document = Mono.Lucene.Net.Documents.Document;
+using Field = Mono.Lucene.Net.Documents.Field;
+using IndexWriter = Mono.Lucene.Net.Index.IndexWriter;
+using Attribute = Mono.Lucene.Net.Util.Attribute;
+using AttributeImpl = Mono.Lucene.Net.Util.AttributeImpl;
+using AttributeSource = Mono.Lucene.Net.Util.AttributeSource;
+
+namespace Mono.Lucene.Net.Analysis
+{
+       
+       /// <summary> A <code>TokenStream</code> enumerates the sequence of tokens, either from
+       /// {@link Field}s of a {@link Document} or from query text.
+       /// <p/>
+       /// This is an abstract class. Concrete subclasses are:
+       /// <ul>
+       /// <li>{@link Tokenizer}, a <code>TokenStream</code> whose input is a Reader; and</li>
+       /// <li>{@link TokenFilter}, a <code>TokenStream</code> whose input is another
+       /// <code>TokenStream</code>.</li>
+       /// </ul>
+       /// A new <code>TokenStream</code> API has been introduced with Lucene 2.9. This API
+       /// has moved from being {@link Token} based to {@link Attribute} based. While
+       /// {@link Token} still exists in 2.9 as a convenience class, the preferred way
+       /// to store the information of a {@link Token} is to use {@link AttributeImpl}s.
+       /// <p/>
+       /// <code>TokenStream</code> now extends {@link AttributeSource}, which provides
+       /// access to all of the token {@link Attribute}s for the <code>TokenStream</code>.
+       /// Note that only one instance per {@link AttributeImpl} is created and reused
+       /// for every token. This approach reduces object creation and allows local
+       /// caching of references to the {@link AttributeImpl}s. See
+       /// {@link #IncrementToken()} for further details.
+       /// <p/>
+       /// <b>The workflow of the new <code>TokenStream</code> API is as follows:</b>
+       /// <ol>
+       /// <li>Instantiation of <code>TokenStream</code>/{@link TokenFilter}s which add/get
+       /// attributes to/from the {@link AttributeSource}.</li>
+       /// <li>The consumer calls {@link TokenStream#Reset()}.</li>
+       /// <li>The consumer retrieves attributes from the stream and stores local
+       /// references to all attributes it wants to access</li>
+       /// <li>The consumer calls {@link #IncrementToken()} until it returns false and
+       /// consumes the attributes after each call.</li>
+       /// <li>The consumer calls {@link #End()} so that any end-of-stream operations
+       /// can be performed.</li>
+       /// <li>The consumer calls {@link #Close()} to release any resource when finished
+       /// using the <code>TokenStream</code></li>
+       /// </ol>
+       /// To make sure that filters and consumers know which attributes are available,
+       /// the attributes must be added during instantiation. Filters and consumers are
+       /// not required to check for availability of attributes in
+       /// {@link #IncrementToken()}.
+       /// <p/>
+       /// You can find some example code for the new API in the analysis package level
+       /// Javadoc.
+       /// <p/>
+       /// Sometimes it is desirable to capture a current state of a <code>TokenStream</code>
+       /// , e. g. for buffering purposes (see {@link CachingTokenFilter},
+       /// {@link TeeSinkTokenFilter}). For this usecase
+       /// {@link AttributeSource#CaptureState} and {@link AttributeSource#RestoreState}
+       /// can be used.
+       /// </summary>
+       public abstract class TokenStream:AttributeSource
+       {
+               private void  InitBlock()
+               {
+                       supportedMethods = GetSupportedMethods(this.GetType());
+               }
+               
+               /// <deprecated> Remove this when old API is removed! 
+               /// </deprecated>
+        [Obsolete("Remove this when old API is removed! ")]
+               private static readonly AttributeFactory DEFAULT_TOKEN_WRAPPER_ATTRIBUTE_FACTORY = new TokenWrapperAttributeFactory(AttributeFactory.DEFAULT_ATTRIBUTE_FACTORY);
+               
+               /// <deprecated> Remove this when old API is removed! 
+               /// </deprecated>
+        [Obsolete("Remove this when old API is removed! ")]
+               private TokenWrapper tokenWrapper;
+               
+               /// <deprecated> Remove this when old API is removed! 
+               /// </deprecated>
+        [Obsolete("Remove this when old API is removed! ")]
+               private static bool onlyUseNewAPI = false;
+               
+               /// <deprecated> Remove this when old API is removed! 
+               /// </deprecated>
+        [Obsolete("Remove this when old API is removed! ")]
+               private MethodSupport supportedMethods;
+               
+               /// <deprecated> Remove this when old API is removed! 
+               /// </deprecated>
+        [Obsolete("Remove this when old API is removed! ")]
+               private sealed class MethodSupport
+               {
+                       internal bool hasIncrementToken;
+                       internal bool hasReusableNext;
+                       internal bool hasNext;
+                       
+                       internal MethodSupport(System.Type clazz)
+                       {
+                               hasIncrementToken = IsMethodOverridden(clazz, "IncrementToken", METHOD_NO_PARAMS);
+                               hasReusableNext = IsMethodOverridden(clazz, "Next", METHOD_TOKEN_PARAM);
+                               hasNext = IsMethodOverridden(clazz, "Next", METHOD_NO_PARAMS);
+                       }
+                       
+                       private static bool IsMethodOverridden(System.Type clazz, System.String name, System.Type[] params_Renamed)
+                       {
+                               try
+                               {
+                                       return clazz.GetMethod(name, params_Renamed).DeclaringType != typeof(TokenStream);
+                               }
+                               catch (System.MethodAccessException e)
+                               {
+                                       // should not happen
+                                       throw new System.SystemException(e.Message, e);
+                               }
+                       }
+                       
+                       private static readonly System.Type[] METHOD_NO_PARAMS = new System.Type[0];
+                       private static readonly System.Type[] METHOD_TOKEN_PARAM = new System.Type[]{typeof(Token)};
+               }
+               
+               /// <deprecated> Remove this when old API is removed! 
+               /// </deprecated>
+        [Obsolete("Remove this when old API is removed! ")]
+               private static readonly System.Collections.Hashtable knownMethodSupport = new System.Collections.Hashtable();
+
+        // {{Aroush-2.9 Port issue, need to mimic java's IdentityHashMap
+        /*
+         * From Java docs:
+         * This class implements the Map interface with a hash table, using 
+         * reference-equality in place of object-equality when comparing keys 
+         * (and values). In other words, in an IdentityHashMap, two keys k1 and k2 
+         * are considered equal if and only if (k1==k2). (In normal Map 
+         * implementations (like HashMap) two keys k1 and k2 are considered 
+         * equal if and only if (k1==null ? k2==null : k1.equals(k2)).) 
+         */
+        // Aroush-2.9}}
+
+               /// <deprecated> Remove this when old API is removed! 
+               /// </deprecated>
+        [Obsolete("Remove this when old API is removed! ")]
+               private static MethodSupport GetSupportedMethods(System.Type clazz)
+               {
+                       MethodSupport supportedMethods;
+                       lock (knownMethodSupport)
+                       {
+                               supportedMethods = (MethodSupport) knownMethodSupport[clazz];
+                               if (supportedMethods == null)
+                               {
+                                       knownMethodSupport.Add(clazz, supportedMethods = new MethodSupport(clazz));
+                               }
+                       }
+                       return supportedMethods;
+               }
+               
+               /// <deprecated> Remove this when old API is removed! 
+               /// </deprecated>
+        [Obsolete("Remove this when old API is removed! ")]
+               private sealed class TokenWrapperAttributeFactory:AttributeFactory
+               {
+                       private AttributeFactory delegate_Renamed;
+                       
+                       internal TokenWrapperAttributeFactory(AttributeFactory delegate_Renamed)
+                       {
+                               this.delegate_Renamed = delegate_Renamed;
+                       }
+                       
+                       public override AttributeImpl CreateAttributeInstance(System.Type attClass)
+                       {
+                               return attClass.IsAssignableFrom(typeof(TokenWrapper))?new TokenWrapper():delegate_Renamed.CreateAttributeInstance(attClass);
+                       }
+                       
+                       // this is needed for TeeSinkTokenStream's check for compatibility of AttributeSource,
+                       // so two TokenStreams using old API have the same AttributeFactory wrapped by this one.
+                       public  override bool Equals(System.Object other)
+                       {
+                               if (this == other)
+                                       return true;
+                               if (other is TokenWrapperAttributeFactory)
+                               {
+                                       TokenWrapperAttributeFactory af = (TokenWrapperAttributeFactory) other;
+                                       return this.delegate_Renamed.Equals(af.delegate_Renamed);
+                               }
+                               return false;
+                       }
+                       
+                       public override int GetHashCode()
+                       {
+                               return delegate_Renamed.GetHashCode() ^ 0x0a45ff31;
+                       }
+               }
+               
+               /// <summary> A TokenStream using the default attribute factory.</summary>
+               protected internal TokenStream():base(onlyUseNewAPI?AttributeFactory.DEFAULT_ATTRIBUTE_FACTORY:TokenStream.DEFAULT_TOKEN_WRAPPER_ATTRIBUTE_FACTORY)
+               {
+                       InitBlock();
+                       tokenWrapper = InitTokenWrapper(null);
+                       Check();
+               }
+               
+               /// <summary> A TokenStream that uses the same attributes as the supplied one.</summary>
+               protected internal TokenStream(AttributeSource input):base(input)
+               {
+                       InitBlock();
+                       tokenWrapper = InitTokenWrapper(input);
+                       Check();
+               }
+               
+               /// <summary> A TokenStream using the supplied AttributeFactory for creating new {@link Attribute} instances.</summary>
+               protected internal TokenStream(AttributeFactory factory):base(onlyUseNewAPI?factory:new TokenWrapperAttributeFactory(factory))
+               {
+                       InitBlock();
+                       tokenWrapper = InitTokenWrapper(null);
+                       Check();
+               }
+               
+               /// <deprecated> Remove this when old API is removed! 
+               /// </deprecated>
+        [Obsolete("Remove this when old API is removed! ")]
+               private TokenWrapper InitTokenWrapper(AttributeSource input)
+               {
+                       if (onlyUseNewAPI)
+                       {
+                               // no wrapper needed
+                               return null;
+                       }
+                       else
+                       {
+                               // if possible get the wrapper from the filter's input stream
+                               if (input is TokenStream && ((TokenStream) input).tokenWrapper != null)
+                               {
+                                       return ((TokenStream) input).tokenWrapper;
+                               }
+                               // check that all attributes are implemented by the same TokenWrapper instance
+                               Attribute att = AddAttribute(typeof(TermAttribute));
+                               if (att is TokenWrapper && AddAttribute(typeof(TypeAttribute)) == att && AddAttribute(typeof(PositionIncrementAttribute)) == att && AddAttribute(typeof(FlagsAttribute)) == att && AddAttribute(typeof(OffsetAttribute)) == att && AddAttribute(typeof(PayloadAttribute)) == att)
+                               {
+                                       return (TokenWrapper) att;
+                               }
+                               else
+                               {
+                                       throw new System.NotSupportedException("If onlyUseNewAPI is disabled, all basic Attributes must be implemented by the internal class " + "TokenWrapper. Please make sure, that all TokenStreams/TokenFilters in this chain have been " + "instantiated with this flag disabled and do not add any custom instances for the basic Attributes!");
+                               }
+                       }
+               }
+               
+               /// <deprecated> Remove this when old API is removed! 
+               /// </deprecated>
+        [Obsolete("Remove this when old API is removed! ")]
+               private void  Check()
+               {
+                       if (onlyUseNewAPI && !supportedMethods.hasIncrementToken)
+                       {
+                               throw new System.NotSupportedException(GetType().FullName + " does not implement incrementToken() which is needed for onlyUseNewAPI.");
+                       }
+                       
+                       // a TokenStream subclass must at least implement one of the methods!
+                       if (!(supportedMethods.hasIncrementToken || supportedMethods.hasNext || supportedMethods.hasReusableNext))
+                       {
+                               throw new System.NotSupportedException(GetType().FullName + " does not implement any of incrementToken(), next(Token), next().");
+                       }
+               }
+               
+               /// <summary> For extra performance you can globally enable the new
+               /// {@link #IncrementToken} API using {@link Attribute}s. There will be a
+               /// small, but in most cases negligible performance increase by enabling this,
+               /// but it only works if <b>all</b> <code>TokenStream</code>s use the new API and
+               /// implement {@link #IncrementToken}. This setting can only be enabled
+               /// globally.
+               /// <p/>
+               /// This setting only affects <code>TokenStream</code>s instantiated after this
+               /// call. All <code>TokenStream</code>s already created use the other setting.
+               /// <p/>
+               /// All core {@link Analyzer}s are compatible with this setting, if you have
+               /// your own <code>TokenStream</code>s that are also compatible, you should enable
+               /// this.
+               /// <p/>
+               /// When enabled, tokenization may throw {@link UnsupportedOperationException}
+               /// s, if the whole tokenizer chain is not compatible eg one of the
+               /// <code>TokenStream</code>s does not implement the new <code>TokenStream</code> API.
+               /// <p/>
+               /// The default is <code>false</code>, so there is the fallback to the old API
+               /// available.
+               /// 
+               /// </summary>
+               /// <deprecated> This setting will no longer be needed in Lucene 3.0 as the old
+               /// API will be removed.
+               /// </deprecated>
+        [Obsolete("This setting will no longer be needed in Lucene 3.0 as the old API will be removed.")]
+               public static void  SetOnlyUseNewAPI(bool onlyUseNewAPI)
+               {
+                       TokenStream.onlyUseNewAPI = onlyUseNewAPI;
+               }
+               
+               /// <summary> Returns if only the new API is used.
+               /// 
+               /// </summary>
+               /// <seealso cref="setOnlyUseNewAPI">
+               /// </seealso>
+               /// <deprecated> This setting will no longer be needed in Lucene 3.0 as
+               /// the old API will be removed.
+               /// </deprecated>
+        [Obsolete("This setting will no longer be needed in Lucene 3.0 as the old API will be removed.")]
+               public static bool GetOnlyUseNewAPI()
+               {
+                       return onlyUseNewAPI;
+               }
+               
+               /// <summary> Consumers (i.e., {@link IndexWriter}) use this method to advance the stream to
+               /// the next token. Implementing classes must implement this method and update
+               /// the appropriate {@link AttributeImpl}s with the attributes of the next
+               /// token.
+               /// 
+               /// The producer must make no assumptions about the attributes after the
+               /// method has been returned: the caller may arbitrarily change it. If the
+               /// producer needs to preserve the state for subsequent calls, it can use
+               /// {@link #captureState} to create a copy of the current attribute state.
+               /// 
+               /// This method is called for every token of a document, so an efficient
+               /// implementation is crucial for good performance. To avoid calls to
+               /// {@link #AddAttribute(Class)} and {@link #GetAttribute(Class)} or downcasts,
+               /// references to all {@link AttributeImpl}s that this stream uses should be
+               /// retrieved during instantiation.
+               /// 
+               /// To ensure that filters and consumers know which attributes are available,
+               /// the attributes must be added during instantiation. Filters and consumers
+               /// are not required to check for availability of attributes in
+               /// {@link #IncrementToken()}.
+               /// 
+               /// </summary>
+               /// <returns> false for end of stream; true otherwise
+               /// 
+               /// Note that this method will be defined abstract in Lucene
+               /// 3.0.
+               /// </returns>
+               public virtual bool IncrementToken()
+               {
+                       System.Diagnostics.Debug.Assert(tokenWrapper != null);
+                       
+                       Token token;
+                       if (supportedMethods.hasReusableNext)
+                       {
+                               token = Next(tokenWrapper.delegate_Renamed);
+                       }
+                       else
+                       {
+                               System.Diagnostics.Debug.Assert(supportedMethods.hasNext);
+                               token = Next();
+                       }
+                       if (token == null)
+                               return false;
+                       tokenWrapper.delegate_Renamed = token;
+                       return true;
+               }
+               
+               /// <summary> This method is called by the consumer after the last token has been
+               /// consumed, after {@link #IncrementToken()} returned <code>false</code>
+               /// (using the new <code>TokenStream</code> API). Streams implementing the old API
+               /// should upgrade to use this feature.
+               /// <p/>
+               /// This method can be used to perform any end-of-stream operations, such as
+               /// setting the final offset of a stream. The final offset of a stream might
+               /// differ from the offset of the last token eg in case one or more whitespaces
+               /// followed after the last token, but a {@link WhitespaceTokenizer} was used.
+               /// 
+               /// </summary>
+               /// <throws>  IOException </throws>
+               public virtual void  End()
+               {
+                       // do nothing by default
+               }
+               
+               /// <summary> Returns the next token in the stream, or null at EOS. When possible, the
+               /// input Token should be used as the returned Token (this gives fastest
+               /// tokenization performance), but this is not required and a new Token may be
+               /// returned. Callers may re-use a single Token instance for successive calls
+               /// to this method.
+               /// 
+               /// This implicitly defines a "contract" between consumers (callers of this
+               /// method) and producers (implementations of this method that are the source
+               /// for tokens):
+               /// <ul>
+               /// <li>A consumer must fully consume the previously returned {@link Token}
+               /// before calling this method again.</li>
+               /// <li>A producer must call {@link Token#Clear()} before setting the fields in
+               /// it and returning it</li>
+               /// </ul>
+               /// Also, the producer must make no assumptions about a {@link Token} after it
+               /// has been returned: the caller may arbitrarily change it. If the producer
+               /// needs to hold onto the {@link Token} for subsequent calls, it must clone()
+               /// it before storing it. Note that a {@link TokenFilter} is considered a
+               /// consumer.
+               /// 
+               /// </summary>
+               /// <param name="reusableToken">a {@link Token} that may or may not be used to return;
+               /// this parameter should never be null (the callee is not required to
+               /// check for null before using it, but it is a good idea to assert that
+               /// it is not null.)
+               /// </param>
+               /// <returns> next {@link Token} in the stream or null if end-of-stream was hit
+               /// </returns>
+               /// <deprecated> The new {@link #IncrementToken()} and {@link AttributeSource}
+               /// APIs should be used instead.
+               /// </deprecated>
+        [Obsolete("The new IncrementToken() and AttributeSource APIs should be used instead.")]
+               public virtual Token Next(Token reusableToken)
+               {
+                       System.Diagnostics.Debug.Assert(reusableToken != null);
+                       
+                       if (tokenWrapper == null)
+                               throw new System.NotSupportedException("This TokenStream only supports the new Attributes API.");
+                       
+                       if (supportedMethods.hasIncrementToken)
+                       {
+                               tokenWrapper.delegate_Renamed = reusableToken;
+                               return IncrementToken()?tokenWrapper.delegate_Renamed:null;
+                       }
+                       else
+                       {
+                               System.Diagnostics.Debug.Assert(supportedMethods.hasNext);
+                               return Next();
+                       }
+               }
+               
+               /// <summary> Returns the next {@link Token} in the stream, or null at EOS.
+               /// 
+               /// </summary>
+               /// <deprecated> The returned Token is a "full private copy" (not re-used across
+               /// calls to {@link #Next()}) but will be slower than calling
+               /// {@link #Next(Token)} or using the new {@link #IncrementToken()}
+               /// method with the new {@link AttributeSource} API.
+               /// </deprecated>
+        [Obsolete("The returned Token is a \"full private copy\" (not re-used across calls to Next()) but will be slower than calling {@link #Next(Token)} or using the new IncrementToken() method with the new AttributeSource API.")]
+               public virtual Token Next()
+               {
+                       if (tokenWrapper == null)
+                               throw new System.NotSupportedException("This TokenStream only supports the new Attributes API.");
+                       
+                       Token nextToken;
+                       if (supportedMethods.hasIncrementToken)
+                       {
+                               Token savedDelegate = tokenWrapper.delegate_Renamed;
+                               tokenWrapper.delegate_Renamed = new Token();
+                               nextToken = IncrementToken()?tokenWrapper.delegate_Renamed:null;
+                               tokenWrapper.delegate_Renamed = savedDelegate;
+                       }
+                       else
+                       {
+                               System.Diagnostics.Debug.Assert(supportedMethods.hasReusableNext);
+                               nextToken = Next(new Token());
+                       }
+                       
+                       if (nextToken != null)
+                       {
+                               Mono.Lucene.Net.Index.Payload p = nextToken.GetPayload();
+                               if (p != null)
+                               {
+                                       nextToken.SetPayload((Mono.Lucene.Net.Index.Payload) p.Clone());
+                               }
+                       }
+                       return nextToken;
+               }
+               
+               /// <summary> Resets this stream to the beginning. This is an optional operation, so
+               /// subclasses may or may not implement this method. {@link #Reset()} is not needed for
+               /// the standard indexing process. However, if the tokens of a
+               /// <code>TokenStream</code> are intended to be consumed more than once, it is
+               /// necessary to implement {@link #Reset()}. Note that if your TokenStream
+               /// caches tokens and feeds them back again after a reset, it is imperative
+               /// that you clone the tokens when you store them away (on the first pass) as
+               /// well as when you return them (on future passes after {@link #Reset()}).
+               /// </summary>
+               public virtual void  Reset()
+               {
+               }
+               
+               /// <summary>Releases resources associated with this stream. </summary>
+               public virtual void  Close()
+               {
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/TokenWrapper.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/TokenWrapper.cs
new file mode 100644 (file)
index 0000000..8de8d4d
--- /dev/null
@@ -0,0 +1,201 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using FlagsAttribute = Mono.Lucene.Net.Analysis.Tokenattributes.FlagsAttribute;
+using OffsetAttribute = Mono.Lucene.Net.Analysis.Tokenattributes.OffsetAttribute;
+using PayloadAttribute = Mono.Lucene.Net.Analysis.Tokenattributes.PayloadAttribute;
+using PositionIncrementAttribute = Mono.Lucene.Net.Analysis.Tokenattributes.PositionIncrementAttribute;
+using TermAttribute = Mono.Lucene.Net.Analysis.Tokenattributes.TermAttribute;
+using TypeAttribute = Mono.Lucene.Net.Analysis.Tokenattributes.TypeAttribute;
+using Payload = Mono.Lucene.Net.Index.Payload;
+using AttributeImpl = Mono.Lucene.Net.Util.AttributeImpl;
+
+namespace Mono.Lucene.Net.Analysis
+{
+       
+       /// <summary> This class wraps a Token and supplies a single attribute instance
+       /// where the delegate token can be replaced.
+       /// </summary>
+       /// <deprecated> Will be removed, when old TokenStream API is removed.
+       /// </deprecated>
+    [Obsolete("Will be removed, when old TokenStream API is removed.")]
+       [Serializable]
+       public sealed class TokenWrapper:AttributeImpl, System.ICloneable, TermAttribute, TypeAttribute, PositionIncrementAttribute, FlagsAttribute, OffsetAttribute, PayloadAttribute
+       {
+               
+               internal Token delegate_Renamed;
+               
+               internal TokenWrapper():this(new Token())
+               {
+               }
+               
+               internal TokenWrapper(Token delegate_Renamed)
+               {
+                       this.delegate_Renamed = delegate_Renamed;
+               }
+               
+               // TermAttribute:
+               
+               public System.String Term()
+               {
+                       return delegate_Renamed.Term();
+               }
+               
+               public void  SetTermBuffer(char[] buffer, int offset, int length)
+               {
+                       delegate_Renamed.SetTermBuffer(buffer, offset, length);
+               }
+               
+               public void  SetTermBuffer(System.String buffer)
+               {
+                       delegate_Renamed.SetTermBuffer(buffer);
+               }
+               
+               public void  SetTermBuffer(System.String buffer, int offset, int length)
+               {
+                       delegate_Renamed.SetTermBuffer(buffer, offset, length);
+               }
+               
+               public char[] TermBuffer()
+               {
+                       return delegate_Renamed.TermBuffer();
+               }
+               
+               public char[] ResizeTermBuffer(int newSize)
+               {
+                       return delegate_Renamed.ResizeTermBuffer(newSize);
+               }
+               
+               public int TermLength()
+               {
+                       return delegate_Renamed.TermLength();
+               }
+               
+               public void  SetTermLength(int length)
+               {
+                       delegate_Renamed.SetTermLength(length);
+               }
+               
+               // TypeAttribute:
+               
+               public System.String Type()
+               {
+                       return delegate_Renamed.Type();
+               }
+               
+               public void  SetType(System.String type)
+               {
+                       delegate_Renamed.SetType(type);
+               }
+               
+               public void  SetPositionIncrement(int positionIncrement)
+               {
+                       delegate_Renamed.SetPositionIncrement(positionIncrement);
+               }
+               
+               public int GetPositionIncrement()
+               {
+                       return delegate_Renamed.GetPositionIncrement();
+               }
+               
+               // FlagsAttribute
+               
+               public int GetFlags()
+               {
+                       return delegate_Renamed.GetFlags();
+               }
+               
+               public void  SetFlags(int flags)
+               {
+                       delegate_Renamed.SetFlags(flags);
+               }
+               
+               // OffsetAttribute
+               
+               public int StartOffset()
+               {
+                       return delegate_Renamed.StartOffset();
+               }
+               
+               public void  SetOffset(int startOffset, int endOffset)
+               {
+                       delegate_Renamed.SetOffset(startOffset, endOffset);
+               }
+               
+               public int EndOffset()
+               {
+                       return delegate_Renamed.EndOffset();
+               }
+               
+               // PayloadAttribute
+               
+               public Payload GetPayload()
+               {
+                       return delegate_Renamed.GetPayload();
+               }
+               
+               public void  SetPayload(Payload payload)
+               {
+                       delegate_Renamed.SetPayload(payload);
+               }
+               
+               // AttributeImpl
+               
+               public override void  Clear()
+               {
+                       delegate_Renamed.Clear();
+               }
+               
+               public override System.String ToString()
+               {
+                       return delegate_Renamed.ToString();
+               }
+               
+               public override int GetHashCode()
+               {
+                       return delegate_Renamed.GetHashCode();
+               }
+               
+               public  override bool Equals(System.Object other)
+               {
+                       if (other is TokenWrapper)
+                       {
+                               return ((TokenWrapper) other).delegate_Renamed.Equals(this.delegate_Renamed);
+                       }
+                       return false;
+               }
+               
+               public override System.Object Clone()
+               {
+                       return new TokenWrapper((Token) delegate_Renamed.Clone());
+               }
+               
+               public override void  CopyTo(AttributeImpl target)
+               {
+                       if (target is TokenWrapper)
+                       {
+                               ((TokenWrapper) target).delegate_Renamed = (Token) this.delegate_Renamed.Clone();
+                       }
+                       else
+                       {
+                               this.delegate_Renamed.CopyTo(target);
+                       }
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/Tokenattributes/FlagsAttribute.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/Tokenattributes/FlagsAttribute.cs
new file mode 100644 (file)
index 0000000..c2f1e00
--- /dev/null
@@ -0,0 +1,47 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using Tokenizer = Mono.Lucene.Net.Analysis.Tokenizer;
+using Attribute = Mono.Lucene.Net.Util.Attribute;
+
+namespace Mono.Lucene.Net.Analysis.Tokenattributes
+{
+       
+       /// <summary> This attribute can be used to pass different flags down the {@link Tokenizer} chain,
+       /// eg from one TokenFilter to another one. 
+       /// </summary>
+       public interface FlagsAttribute:Attribute
+       {
+               /// <summary> EXPERIMENTAL:  While we think this is here to stay, we may want to change it to be a long.
+               /// <p/>
+               /// 
+               /// Get the bitset for any bits that have been set.  This is completely distinct from {@link TypeAttribute#Type()}, although they do share similar purposes.
+               /// The flags can be used to encode information about the token for use by other {@link Mono.Lucene.Net.Analysis.TokenFilter}s.
+               /// 
+               /// 
+               /// </summary>
+               /// <returns> The bits
+               /// </returns>
+               int GetFlags();
+               
+               /// <seealso cref="GetFlags()">
+               /// </seealso>
+               void  SetFlags(int flags);
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/Tokenattributes/FlagsAttributeImpl.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/Tokenattributes/FlagsAttributeImpl.cs
new file mode 100644 (file)
index 0000000..4322d06
--- /dev/null
@@ -0,0 +1,93 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using AttributeImpl = Mono.Lucene.Net.Util.AttributeImpl;
+
+namespace Mono.Lucene.Net.Analysis.Tokenattributes
+{
+       
+       /// <summary> This attribute can be used to pass different flags down the tokenizer chain,
+       /// eg from one TokenFilter to another one. 
+       /// </summary>
+       [Serializable]
+       public class FlagsAttributeImpl:AttributeImpl, FlagsAttribute, System.ICloneable
+       {
+               private int flags = 0;
+               
+               /// <summary> EXPERIMENTAL:  While we think this is here to stay, we may want to change it to be a long.
+               /// <p/>
+               /// 
+               /// Get the bitset for any bits that have been set.  This is completely distinct from {@link TypeAttribute#Type()}, although they do share similar purposes.
+               /// The flags can be used to encode information about the token for use by other {@link Mono.Lucene.Net.Analysis.TokenFilter}s.
+               /// 
+               /// 
+               /// </summary>
+               /// <returns> The bits
+               /// </returns>
+               public virtual int GetFlags()
+               {
+                       return flags;
+               }
+               
+               /// <seealso cref="GetFlags()">
+               /// </seealso>
+               public virtual void  SetFlags(int flags)
+               {
+                       this.flags = flags;
+               }
+               
+               public override void  Clear()
+               {
+                       flags = 0;
+               }
+               
+               public  override bool Equals(System.Object other)
+               {
+                       if (this == other)
+                       {
+                               return true;
+                       }
+                       
+                       if (other is FlagsAttributeImpl)
+                       {
+                               return ((FlagsAttributeImpl) other).flags == flags;
+                       }
+                       
+                       return false;
+               }
+               
+               public override int GetHashCode()
+               {
+                       return flags;
+               }
+               
+               public override void  CopyTo(AttributeImpl target)
+               {
+                       FlagsAttribute t = (FlagsAttribute) target;
+                       t.SetFlags(flags);
+               }
+               
+               override public System.Object Clone()
+               {
+            FlagsAttributeImpl impl = new FlagsAttributeImpl();
+            impl.flags = this.flags;
+            return impl;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/Tokenattributes/OffsetAttribute.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/Tokenattributes/OffsetAttribute.cs
new file mode 100644 (file)
index 0000000..84b1805
--- /dev/null
@@ -0,0 +1,49 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using Attribute = Mono.Lucene.Net.Util.Attribute;
+
+namespace Mono.Lucene.Net.Analysis.Tokenattributes
+{
+       
+       /// <summary> The start and end character offset of a Token. </summary>
+       public interface OffsetAttribute:Attribute
+       {
+               /// <summary>Returns this Token's starting offset, the position of the first character
+               /// corresponding to this token in the source text.
+               /// Note that the difference between endOffset() and startOffset() may not be
+               /// equal to termText.length(), as the term text may have been altered by a
+               /// stemmer or some other filter. 
+               /// </summary>
+               int StartOffset();
+               
+               
+               /// <summary>Set the starting and ending offset.
+        /// See StartOffset() and EndOffset()
+        /// </summary>
+               void  SetOffset(int startOffset, int endOffset);
+               
+               
+               /// <summary>Returns this Token's ending offset, one greater than the position of the
+               /// last character corresponding to this token in the source text. The length
+               /// of the token in the source text is (endOffset - startOffset). 
+               /// </summary>
+               int EndOffset();
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/Tokenattributes/OffsetAttributeImpl.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/Tokenattributes/OffsetAttributeImpl.cs
new file mode 100644 (file)
index 0000000..6f1106c
--- /dev/null
@@ -0,0 +1,107 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using AttributeImpl = Mono.Lucene.Net.Util.AttributeImpl;
+
+namespace Mono.Lucene.Net.Analysis.Tokenattributes
+{
+       
+       /// <summary> The start and end character offset of a Token. </summary>
+       [Serializable]
+       public class OffsetAttributeImpl:AttributeImpl, OffsetAttribute, System.ICloneable
+       {
+               private int startOffset;
+               private int endOffset;
+               
+               /// <summary>Returns this Token's starting offset, the position of the first character
+               /// corresponding to this token in the source text.
+               /// Note that the difference between endOffset() and startOffset() may not be
+               /// equal to termText.length(), as the term text may have been altered by a
+               /// stemmer or some other filter. 
+               /// </summary>
+               public virtual int StartOffset()
+               {
+                       return startOffset;
+               }
+
+
+        /// <summary>Set the starting and ending offset.
+        /// See StartOffset() and EndOffset()
+        /// </summary>
+               public virtual void  SetOffset(int startOffset, int endOffset)
+               {
+                       this.startOffset = startOffset;
+                       this.endOffset = endOffset;
+               }
+               
+               
+               /// <summary>Returns this Token's ending offset, one greater than the position of the
+               /// last character corresponding to this token in the source text. The length
+               /// of the token in the source text is (endOffset - startOffset). 
+               /// </summary>
+               public virtual int EndOffset()
+               {
+                       return endOffset;
+               }
+               
+               
+               public override void  Clear()
+               {
+                       startOffset = 0;
+                       endOffset = 0;
+               }
+               
+               public  override bool Equals(System.Object other)
+               {
+                       if (other == this)
+                       {
+                               return true;
+                       }
+                       
+                       if (other is OffsetAttributeImpl)
+                       {
+                               OffsetAttributeImpl o = (OffsetAttributeImpl) other;
+                               return o.startOffset == startOffset && o.endOffset == endOffset;
+                       }
+                       
+                       return false;
+               }
+               
+               public override int GetHashCode()
+               {
+                       int code = startOffset;
+                       code = code * 31 + endOffset;
+                       return code;
+               }
+               
+               public override void  CopyTo(AttributeImpl target)
+               {
+                       OffsetAttribute t = (OffsetAttribute) target;
+                       t.SetOffset(startOffset, endOffset);
+               }
+               
+               override public System.Object Clone()
+               {
+            OffsetAttributeImpl impl = new OffsetAttributeImpl();
+            impl.endOffset = endOffset;
+            impl.startOffset = startOffset;
+            return impl;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/Tokenattributes/PayloadAttribute.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/Tokenattributes/PayloadAttribute.cs
new file mode 100644 (file)
index 0000000..6fcc559
--- /dev/null
@@ -0,0 +1,35 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using Payload = Mono.Lucene.Net.Index.Payload;
+using Attribute = Mono.Lucene.Net.Util.Attribute;
+
+namespace Mono.Lucene.Net.Analysis.Tokenattributes
+{
+       
+       /// <summary> The payload of a Token. See also {@link Payload}.</summary>
+       public interface PayloadAttribute:Attribute
+       {
+               /// <summary> Returns this Token's payload.</summary>
+               Payload GetPayload();
+               
+               /// <summary> Sets this Token's payload.</summary>
+               void  SetPayload(Payload payload);
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/Tokenattributes/PayloadAttributeImpl.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/Tokenattributes/PayloadAttributeImpl.cs
new file mode 100644 (file)
index 0000000..4da11db
--- /dev/null
@@ -0,0 +1,99 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using Payload = Mono.Lucene.Net.Index.Payload;
+using AttributeImpl = Mono.Lucene.Net.Util.AttributeImpl;
+
+namespace Mono.Lucene.Net.Analysis.Tokenattributes
+{
+       
+       /// <summary> The payload of a Token. See also {@link Payload}.</summary>
+       [Serializable]
+       public class PayloadAttributeImpl:AttributeImpl, PayloadAttribute, System.ICloneable
+       {
+               private Payload payload;
+               
+               /// <summary> Initialize this attribute with no payload.</summary>
+               public PayloadAttributeImpl()
+               {
+               }
+               
+               /// <summary> Initialize this attribute with the given payload. </summary>
+               public PayloadAttributeImpl(Payload payload)
+               {
+                       this.payload = payload;
+               }
+               
+               /// <summary> Returns this Token's payload.</summary>
+               public virtual Payload GetPayload()
+               {
+                       return this.payload;
+               }
+               
+               /// <summary> Sets this Token's payload.</summary>
+               public virtual void  SetPayload(Payload payload)
+               {
+                       this.payload = payload;
+               }
+               
+               public override void  Clear()
+               {
+                       payload = null;
+               }
+               
+               public override System.Object Clone()
+               {
+            PayloadAttributeImpl impl = new PayloadAttributeImpl();
+            impl.payload = new Payload(this.payload.data, this.payload.offset, this.payload.length);
+            return impl;
+               }
+               
+               public  override bool Equals(System.Object other)
+               {
+                       if (other == this)
+                       {
+                               return true;
+                       }
+                       
+                       if (other is PayloadAttribute)
+                       {
+                               PayloadAttributeImpl o = (PayloadAttributeImpl) other;
+                               if (o.payload == null || payload == null)
+                               {
+                                       return o.payload == null && payload == null;
+                               }
+                               
+                               return o.payload.Equals(payload);
+                       }
+                       
+                       return false;
+               }
+               
+               public override int GetHashCode()
+               {
+                       return (payload == null)?0:payload.GetHashCode();
+               }
+               
+               public override void  CopyTo(AttributeImpl target)
+               {
+                       PayloadAttribute t = (PayloadAttribute) target;
+                       t.SetPayload((payload == null)?null:(Payload) payload.Clone());
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/Tokenattributes/PositionIncrementAttribute.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/Tokenattributes/PositionIncrementAttribute.cs
new file mode 100644 (file)
index 0000000..163611b
--- /dev/null
@@ -0,0 +1,66 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using Attribute = Mono.Lucene.Net.Util.Attribute;
+
+namespace Mono.Lucene.Net.Analysis.Tokenattributes
+{
+       
+       /// <summary>The positionIncrement determines the position of this token
+       /// relative to the previous Token in a TokenStream, used in phrase
+       /// searching.
+       /// 
+       /// <p/>The default value is one.
+       /// 
+       /// <p/>Some common uses for this are:<ul>
+       /// 
+       /// <li>Set it to zero to put multiple terms in the same position.  This is
+       /// useful if, e.g., a word has multiple stems.  Searches for phrases
+       /// including either stem will match.  In this case, all but the first stem's
+       /// increment should be set to zero: the increment of the first instance
+       /// should be one.  Repeating a token with an increment of zero can also be
+       /// used to boost the scores of matches on that token.</li>
+       /// 
+       /// <li>Set it to values greater than one to inhibit exact phrase matches.
+       /// If, for example, one does not want phrases to match across removed stop
+       /// words, then one could build a stop word filter that removes stop words and
+       /// also sets the increment to the number of stop words removed before each
+       /// non-stop word.  Then exact phrase queries will only match when the terms
+       /// occur with no intervening stop words.</li>
+       /// 
+       /// </ul>
+       /// 
+       /// </summary>
+       /// <seealso cref="Mono.Lucene.Net.Index.TermPositions">
+       /// </seealso>
+       public interface PositionIncrementAttribute:Attribute
+       {
+               /// <summary>Set the position increment. The default value is one.
+               /// 
+               /// </summary>
+               /// <param name="positionIncrement">the distance from the prior term
+               /// </param>
+               void  SetPositionIncrement(int positionIncrement);
+               
+               /// <summary>Returns the position increment of this Token.</summary>
+               /// <seealso cref="setPositionIncrement">
+               /// </seealso>
+               int GetPositionIncrement();
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/Tokenattributes/PositionIncrementAttributeImpl.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/Tokenattributes/PositionIncrementAttributeImpl.cs
new file mode 100644 (file)
index 0000000..951a063
--- /dev/null
@@ -0,0 +1,113 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using TokenStream = Mono.Lucene.Net.Analysis.TokenStream;
+using AttributeImpl = Mono.Lucene.Net.Util.AttributeImpl;
+
+namespace Mono.Lucene.Net.Analysis.Tokenattributes
+{
+       
+       /// <summary>The positionIncrement determines the position of this token
+       /// relative to the previous Token in a {@link TokenStream}, used in phrase
+       /// searching.
+       /// 
+       /// <p/>The default value is one.
+       /// 
+       /// <p/>Some common uses for this are:<ul>
+       /// 
+       /// <li>Set it to zero to put multiple terms in the same position.  This is
+       /// useful if, e.g., a word has multiple stems.  Searches for phrases
+       /// including either stem will match.  In this case, all but the first stem's
+       /// increment should be set to zero: the increment of the first instance
+       /// should be one.  Repeating a token with an increment of zero can also be
+       /// used to boost the scores of matches on that token.</li>
+       /// 
+       /// <li>Set it to values greater than one to inhibit exact phrase matches.
+       /// If, for example, one does not want phrases to match across removed stop
+       /// words, then one could build a stop word filter that removes stop words and
+       /// also sets the increment to the number of stop words removed before each
+       /// non-stop word.  Then exact phrase queries will only match when the terms
+       /// occur with no intervening stop words.</li>
+       /// 
+       /// </ul>
+       /// </summary>
+       [Serializable]
+       public class PositionIncrementAttributeImpl:AttributeImpl, PositionIncrementAttribute, System.ICloneable
+       {
+               private int positionIncrement = 1;
+               
+               /// <summary>Set the position increment. The default value is one.
+               /// 
+               /// </summary>
+               /// <param name="positionIncrement">the distance from the prior term
+               /// </param>
+               public virtual void  SetPositionIncrement(int positionIncrement)
+               {
+                       if (positionIncrement < 0)
+                               throw new System.ArgumentException("Increment must be zero or greater: " + positionIncrement);
+                       this.positionIncrement = positionIncrement;
+               }
+               
+               /// <summary>Returns the position increment of this Token.</summary>
+               /// <seealso cref="setPositionIncrement">
+               /// </seealso>
+               public virtual int GetPositionIncrement()
+               {
+                       return positionIncrement;
+               }
+               
+               public override void  Clear()
+               {
+                       this.positionIncrement = 1;
+               }
+               
+               public  override bool Equals(System.Object other)
+               {
+                       if (other == this)
+                       {
+                               return true;
+                       }
+                       
+                       if (other is PositionIncrementAttributeImpl)
+                       {
+                               return positionIncrement == ((PositionIncrementAttributeImpl) other).positionIncrement;
+                       }
+                       
+                       return false;
+               }
+               
+               public override int GetHashCode()
+               {
+                       return positionIncrement;
+               }
+               
+               public override void  CopyTo(AttributeImpl target)
+               {
+                       PositionIncrementAttribute t = (PositionIncrementAttribute) target;
+                       t.SetPositionIncrement(positionIncrement);
+               }
+               
+               override public System.Object Clone()
+               {
+            PositionIncrementAttributeImpl impl = new PositionIncrementAttributeImpl();
+            impl.positionIncrement = positionIncrement;
+            return impl;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/Tokenattributes/TermAttribute.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/Tokenattributes/TermAttribute.cs
new file mode 100644 (file)
index 0000000..8a7d851
--- /dev/null
@@ -0,0 +1,105 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using Attribute = Mono.Lucene.Net.Util.Attribute;
+
+namespace Mono.Lucene.Net.Analysis.Tokenattributes
+{
+       
+       /// <summary> The term text of a Token.</summary>
+       public interface TermAttribute:Attribute
+       {
+               /// <summary>Returns the Token's term text.
+               /// 
+               /// This method has a performance penalty
+               /// because the text is stored internally in a char[].  If
+               /// possible, use {@link #TermBuffer()} and {@link
+               /// #TermLength()} directly instead.  If you really need a
+               /// String, use this method, which is nothing more than
+               /// a convenience call to <b>new String(token.termBuffer(), 0, token.termLength())</b>
+               /// </summary>
+               System.String Term();
+               
+               /// <summary>Copies the contents of buffer, starting at offset for
+               /// length characters, into the termBuffer array.
+               /// </summary>
+               /// <param name="buffer">the buffer to copy
+               /// </param>
+               /// <param name="offset">the index in the buffer of the first character to copy
+               /// </param>
+               /// <param name="length">the number of characters to copy
+               /// </param>
+               void  SetTermBuffer(char[] buffer, int offset, int length);
+               
+               /// <summary>Copies the contents of buffer into the termBuffer array.</summary>
+               /// <param name="buffer">the buffer to copy
+               /// </param>
+               void  SetTermBuffer(System.String buffer);
+               
+               /// <summary>Copies the contents of buffer, starting at offset and continuing
+               /// for length characters, into the termBuffer array.
+               /// </summary>
+               /// <param name="buffer">the buffer to copy
+               /// </param>
+               /// <param name="offset">the index in the buffer of the first character to copy
+               /// </param>
+               /// <param name="length">the number of characters to copy
+               /// </param>
+               void  SetTermBuffer(System.String buffer, int offset, int length);
+               
+               /// <summary>Returns the internal termBuffer character array which
+               /// you can then directly alter.  If the array is too
+               /// small for your token, use {@link
+               /// #ResizeTermBuffer(int)} to increase it.  After
+               /// altering the buffer be sure to call {@link
+               /// #setTermLength} to record the number of valid
+               /// characters that were placed into the termBuffer. 
+               /// </summary>
+               char[] TermBuffer();
+               
+               /// <summary>Grows the termBuffer to at least size newSize, preserving the
+               /// existing content. Note: If the next operation is to change
+               /// the contents of the term buffer use
+               /// {@link #SetTermBuffer(char[], int, int)},
+               /// {@link #SetTermBuffer(String)}, or
+               /// {@link #SetTermBuffer(String, int, int)}
+               /// to optimally combine the resize with the setting of the termBuffer.
+               /// </summary>
+               /// <param name="newSize">minimum size of the new termBuffer
+               /// </param>
+               /// <returns> newly created termBuffer with length >= newSize
+               /// </returns>
+               char[] ResizeTermBuffer(int newSize);
+               
+               /// <summary>Return number of valid characters (length of the term)
+               /// in the termBuffer array. 
+               /// </summary>
+               int TermLength();
+               
+               /// <summary>Set number of valid characters (length of the term) in
+               /// the termBuffer array. Use this to truncate the termBuffer
+               /// or to synchronize with external manipulation of the termBuffer.
+               /// Note: to grow the size of the array,
+               /// use {@link #ResizeTermBuffer(int)} first.
+               /// </summary>
+               /// <param name="length">the truncated length
+               /// </param>
+               void  SetTermLength(int length);
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/Tokenattributes/TermAttributeImpl.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/Tokenattributes/TermAttributeImpl.cs
new file mode 100644 (file)
index 0000000..858952b
--- /dev/null
@@ -0,0 +1,265 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using ArrayUtil = Mono.Lucene.Net.Util.ArrayUtil;
+using AttributeImpl = Mono.Lucene.Net.Util.AttributeImpl;
+
+namespace Mono.Lucene.Net.Analysis.Tokenattributes
+{
+       
+       /// <summary> The term text of a Token.</summary>
+       [Serializable]
+       public class TermAttributeImpl:AttributeImpl, TermAttribute, System.ICloneable
+       {
+               private static int MIN_BUFFER_SIZE = 10;
+               
+               private char[] termBuffer;
+               private int termLength;
+               
+               /// <summary>Returns the Token's term text.
+               /// 
+               /// This method has a performance penalty
+               /// because the text is stored internally in a char[].  If
+               /// possible, use {@link #TermBuffer()} and {@link
+               /// #TermLength()} directly instead.  If you really need a
+               /// String, use this method, which is nothing more than
+               /// a convenience call to <b>new String(token.termBuffer(), 0, token.termLength())</b>
+               /// </summary>
+               public virtual System.String Term()
+               {
+                       InitTermBuffer();
+                       return new System.String(termBuffer, 0, termLength);
+               }
+               
+               /// <summary>Copies the contents of buffer, starting at offset for
+               /// length characters, into the termBuffer array.
+               /// </summary>
+               /// <param name="buffer">the buffer to copy
+               /// </param>
+               /// <param name="offset">the index in the buffer of the first character to copy
+               /// </param>
+               /// <param name="length">the number of characters to copy
+               /// </param>
+               public virtual void  SetTermBuffer(char[] buffer, int offset, int length)
+               {
+                       GrowTermBuffer(length);
+                       Array.Copy(buffer, offset, termBuffer, 0, length);
+                       termLength = length;
+               }
+               
+               /// <summary>Copies the contents of buffer into the termBuffer array.</summary>
+               /// <param name="buffer">the buffer to copy
+               /// </param>
+               public virtual void  SetTermBuffer(System.String buffer)
+               {
+                       int length = buffer.Length;
+                       GrowTermBuffer(length);
+                       SupportClass.TextSupport.GetCharsFromString(buffer, 0, length, termBuffer, 0);
+                       termLength = length;
+               }
+               
+               /// <summary>Copies the contents of buffer, starting at offset and continuing
+               /// for length characters, into the termBuffer array.
+               /// </summary>
+               /// <param name="buffer">the buffer to copy
+               /// </param>
+               /// <param name="offset">the index in the buffer of the first character to copy
+               /// </param>
+               /// <param name="length">the number of characters to copy
+               /// </param>
+               public virtual void  SetTermBuffer(System.String buffer, int offset, int length)
+               {
+                       System.Diagnostics.Debug.Assert(offset <= buffer.Length);
+                       System.Diagnostics.Debug.Assert(offset + length <= buffer.Length);
+                       GrowTermBuffer(length);
+                       SupportClass.TextSupport.GetCharsFromString(buffer, offset, offset + length, termBuffer, 0);
+                       termLength = length;
+               }
+               
+               /// <summary>Returns the internal termBuffer character array which
+               /// you can then directly alter.  If the array is too
+               /// small for your token, use {@link
+               /// #ResizeTermBuffer(int)} to increase it.  After
+               /// altering the buffer be sure to call {@link
+               /// #setTermLength} to record the number of valid
+               /// characters that were placed into the termBuffer. 
+               /// </summary>
+               public virtual char[] TermBuffer()
+               {
+                       InitTermBuffer();
+                       return termBuffer;
+               }
+               
+               /// <summary>Grows the termBuffer to at least size newSize, preserving the
+               /// existing content. Note: If the next operation is to change
+               /// the contents of the term buffer use
+               /// {@link #SetTermBuffer(char[], int, int)},
+               /// {@link #SetTermBuffer(String)}, or
+               /// {@link #SetTermBuffer(String, int, int)}
+               /// to optimally combine the resize with the setting of the termBuffer.
+               /// </summary>
+               /// <param name="newSize">minimum size of the new termBuffer
+               /// </param>
+               /// <returns> newly created termBuffer with length >= newSize
+               /// </returns>
+               public virtual char[] ResizeTermBuffer(int newSize)
+               {
+                       if (termBuffer == null)
+                       {
+                               // The buffer is always at least MIN_BUFFER_SIZE
+                               termBuffer = new char[ArrayUtil.GetNextSize(newSize < MIN_BUFFER_SIZE?MIN_BUFFER_SIZE:newSize)];
+                       }
+                       else
+                       {
+                               if (termBuffer.Length < newSize)
+                               {
+                                       // Not big enough; create a new array with slight
+                                       // over allocation and preserve content
+                                       char[] newCharBuffer = new char[ArrayUtil.GetNextSize(newSize)];
+                                       Array.Copy(termBuffer, 0, newCharBuffer, 0, termBuffer.Length);
+                                       termBuffer = newCharBuffer;
+                               }
+                       }
+                       return termBuffer;
+               }
+               
+               
+               /// <summary>Allocates a buffer char[] of at least newSize, without preserving the existing content.
+               /// its always used in places that set the content 
+               /// </summary>
+               /// <param name="newSize">minimum size of the buffer
+               /// </param>
+               private void  GrowTermBuffer(int newSize)
+               {
+                       if (termBuffer == null)
+                       {
+                               // The buffer is always at least MIN_BUFFER_SIZE
+                               termBuffer = new char[ArrayUtil.GetNextSize(newSize < MIN_BUFFER_SIZE?MIN_BUFFER_SIZE:newSize)];
+                       }
+                       else
+                       {
+                               if (termBuffer.Length < newSize)
+                               {
+                                       // Not big enough; create a new array with slight
+                                       // over allocation:
+                                       termBuffer = new char[ArrayUtil.GetNextSize(newSize)];
+                               }
+                       }
+               }
+               
+               private void  InitTermBuffer()
+               {
+                       if (termBuffer == null)
+                       {
+                               termBuffer = new char[ArrayUtil.GetNextSize(MIN_BUFFER_SIZE)];
+                               termLength = 0;
+                       }
+               }
+               
+               /// <summary>Return number of valid characters (length of the term)
+               /// in the termBuffer array. 
+               /// </summary>
+               public virtual int TermLength()
+               {
+                       return termLength;
+               }
+               
+               /// <summary>Set number of valid characters (length of the term) in
+               /// the termBuffer array. Use this to truncate the termBuffer
+               /// or to synchronize with external manipulation of the termBuffer.
+               /// Note: to grow the size of the array,
+               /// use {@link #ResizeTermBuffer(int)} first.
+               /// </summary>
+               /// <param name="length">the truncated length
+               /// </param>
+               public virtual void  SetTermLength(int length)
+               {
+                       InitTermBuffer();
+                       if (length > termBuffer.Length)
+                               throw new System.ArgumentException("length " + length + " exceeds the size of the termBuffer (" + termBuffer.Length + ")");
+                       termLength = length;
+               }
+               
+               public override int GetHashCode()
+               {
+                       InitTermBuffer();
+                       int code = termLength;
+                       code = code * 31 + ArrayUtil.HashCode(termBuffer, 0, termLength);
+                       return code;
+               }
+               
+               public override void  Clear()
+               {
+                       termLength = 0;
+               }
+               
+               public override System.Object Clone()
+               {
+                       TermAttributeImpl t = (TermAttributeImpl) base.Clone();
+                       // Do a deep clone
+                       if (termBuffer != null)
+                       {
+                               t.termBuffer = new char[termBuffer.Length];
+                               termBuffer.CopyTo(t.termBuffer, 0);
+                       }
+                       return t;
+               }
+               
+               public  override bool Equals(System.Object other)
+               {
+                       if (other == this)
+                       {
+                               return true;
+                       }
+                       
+                       if (other is TermAttribute)
+                       {
+                               InitTermBuffer();
+                               TermAttributeImpl o = ((TermAttributeImpl) other);
+                               o.InitTermBuffer();
+                               
+                               if (termLength != o.termLength)
+                                       return false;
+                               for (int i = 0; i < termLength; i++)
+                               {
+                                       if (termBuffer[i] != o.termBuffer[i])
+                                       {
+                                               return false;
+                                       }
+                               }
+                               return true;
+                       }
+                       
+                       return false;
+               }
+               
+               public override System.String ToString()
+               {
+                       InitTermBuffer();
+                       return "term=" + new System.String(termBuffer, 0, termLength);
+               }
+               
+               public override void  CopyTo(AttributeImpl target)
+               {
+                       InitTermBuffer();
+                       TermAttribute t = (TermAttribute) target;
+                       t.SetTermBuffer(termBuffer, 0, termLength);
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/Tokenattributes/TypeAttribute.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/Tokenattributes/TypeAttribute.cs
new file mode 100644 (file)
index 0000000..bd11b69
--- /dev/null
@@ -0,0 +1,36 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using Attribute = Mono.Lucene.Net.Util.Attribute;
+
+namespace Mono.Lucene.Net.Analysis.Tokenattributes
+{
+       
+       /// <summary> A Token's lexical type. The Default value is "word". </summary>
+       public interface TypeAttribute:Attribute
+       {
+               /// <summary>Returns this Token's lexical type.  Defaults to "word". </summary>
+               System.String Type();
+               
+               /// <summary>Set the lexical type.</summary>
+               /// <seealso cref="Type()">
+               /// </seealso>
+               void  SetType(System.String type);
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/Tokenattributes/TypeAttributeImpl.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/Tokenattributes/TypeAttributeImpl.cs
new file mode 100644 (file)
index 0000000..9e77fed
--- /dev/null
@@ -0,0 +1,93 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using AttributeImpl = Mono.Lucene.Net.Util.AttributeImpl;
+
+namespace Mono.Lucene.Net.Analysis.Tokenattributes
+{
+       
+       /// <summary> A Token's lexical type. The Default value is "word". </summary>
+       [Serializable]
+       public class TypeAttributeImpl:AttributeImpl, TypeAttribute, System.ICloneable
+       {
+               private System.String type;
+               public const System.String DEFAULT_TYPE = "word";
+               
+               public TypeAttributeImpl():this(DEFAULT_TYPE)
+               {
+               }
+               
+               public TypeAttributeImpl(System.String type)
+               {
+                       this.type = type;
+               }
+               
+               /// <summary>Returns this Token's lexical type.  Defaults to "word". </summary>
+               public virtual System.String Type()
+               {
+                       return type;
+               }
+               
+               /// <summary>Set the lexical type.</summary>
+               /// <seealso cref="Type()">
+               /// </seealso>
+               public virtual void  SetType(System.String type)
+               {
+                       this.type = type;
+               }
+               
+               public override void  Clear()
+               {
+                       type = DEFAULT_TYPE;
+               }
+               
+               public  override bool Equals(System.Object other)
+               {
+                       if (other == this)
+                       {
+                               return true;
+                       }
+                       
+                       if (other is TypeAttributeImpl)
+                       {
+                               return type.Equals(((TypeAttributeImpl) other).type);
+                       }
+                       
+                       return false;
+               }
+               
+               public override int GetHashCode()
+               {
+                       return type.GetHashCode();
+               }
+               
+               public override void  CopyTo(AttributeImpl target)
+               {
+                       TypeAttribute t = (TypeAttribute) target;
+                       t.SetType(type);
+               }
+               
+               override public System.Object Clone()
+               {
+            TypeAttributeImpl impl = new TypeAttributeImpl();
+            impl.type = type;
+            return impl;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/Tokenizer.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/Tokenizer.cs
new file mode 100644 (file)
index 0000000..9046996
--- /dev/null
@@ -0,0 +1,108 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using AttributeSource = Mono.Lucene.Net.Util.AttributeSource;
+
+namespace Mono.Lucene.Net.Analysis
+{
+       
+       /// <summary> A Tokenizer is a TokenStream whose input is a Reader.
+       /// <p/>
+       /// This is an abstract class; subclasses must override {@link #IncrementToken()}
+       /// <p/>
+    /// NOTE: Subclasses overriding {@link #next(Token)} must call
+       /// {@link AttributeSource#ClearAttributes()} before setting attributes.
+       /// Subclasses overriding {@link #IncrementToken()} must call
+       /// {@link Token#Clear()} before setting Token attributes.
+       /// </summary>
+       
+       public abstract class Tokenizer:TokenStream
+       {
+               /// <summary>The text source for this Tokenizer. </summary>
+               protected internal System.IO.TextReader input;
+               
+               /// <summary>Construct a tokenizer with null input. </summary>
+               protected internal Tokenizer()
+               {
+               }
+               
+               /// <summary>Construct a token stream processing the given input. </summary>
+               protected internal Tokenizer(System.IO.TextReader input)
+               {
+                       this.input = CharReader.Get(input);
+               }
+               
+               /// <summary>Construct a tokenizer with null input using the given AttributeFactory. </summary>
+               protected internal Tokenizer(AttributeFactory factory):base(factory)
+               {
+               }
+               
+               /// <summary>Construct a token stream processing the given input using the given AttributeFactory. </summary>
+               protected internal Tokenizer(AttributeFactory factory, System.IO.TextReader input):base(factory)
+               {
+                       this.input = CharReader.Get(input);
+               }
+               
+               /// <summary>Construct a token stream processing the given input using the given AttributeSource. </summary>
+               protected internal Tokenizer(AttributeSource source):base(source)
+               {
+               }
+               
+               /// <summary>Construct a token stream processing the given input using the given AttributeSource. </summary>
+               protected internal Tokenizer(AttributeSource source, System.IO.TextReader input):base(source)
+               {
+                       this.input = CharReader.Get(input);
+               }
+               
+               /// <summary>By default, closes the input Reader. </summary>
+               public override void  Close()
+               {
+            if (input != null) {
+                input.Close();
+                // LUCENE-2387: don't hold onto Reader after close, so
+                // GC can reclaim
+                input = null;
+            }
+
+               }
+  
+               /// <summary>Return the corrected offset. If {@link #input} is a {@link CharStream} subclass
+               /// this method calls {@link CharStream#CorrectOffset}, else returns <code>currentOff</code>.
+               /// </summary>
+               /// <param name="currentOff">offset as seen in the output
+               /// </param>
+               /// <returns> corrected offset based on the input
+               /// </returns>
+               /// <seealso cref="CharStream.CorrectOffset">
+               /// </seealso>
+               protected internal int CorrectOffset(int currentOff)
+               {
+                       return (input is CharStream)?((CharStream) input).CorrectOffset(currentOff):currentOff;
+               }
+               
+               /// <summary>Expert: Reset the tokenizer to a new reader.  Typically, an
+               /// analyzer (in its reusableTokenStream method) will use
+               /// this to re-use a previously created tokenizer. 
+               /// </summary>
+               public virtual void  Reset(System.IO.TextReader input)
+               {
+                       this.input = input;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/WhitespaceAnalyzer.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/WhitespaceAnalyzer.cs
new file mode 100644 (file)
index 0000000..93a0d79
--- /dev/null
@@ -0,0 +1,45 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Analysis
+{
+       
+       /// <summary>An Analyzer that uses {@link WhitespaceTokenizer}. </summary>
+       
+       public sealed class WhitespaceAnalyzer:Analyzer
+       {
+               public override TokenStream TokenStream(System.String fieldName, System.IO.TextReader reader)
+               {
+                       return new WhitespaceTokenizer(reader);
+               }
+               
+               public override TokenStream ReusableTokenStream(System.String fieldName, System.IO.TextReader reader)
+               {
+                       Tokenizer tokenizer = (Tokenizer) GetPreviousTokenStream();
+                       if (tokenizer == null)
+                       {
+                               tokenizer = new WhitespaceTokenizer(reader);
+                               SetPreviousTokenStream(tokenizer);
+                       }
+                       else
+                               tokenizer.Reset(reader);
+                       return tokenizer;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/WhitespaceTokenizer.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/WhitespaceTokenizer.cs
new file mode 100644 (file)
index 0000000..0e9f445
--- /dev/null
@@ -0,0 +1,54 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using AttributeSource = Mono.Lucene.Net.Util.AttributeSource;
+
+namespace Mono.Lucene.Net.Analysis
+{
+       
+       /// <summary>A WhitespaceTokenizer is a tokenizer that divides text at whitespace.
+       /// Adjacent sequences of non-Whitespace characters form tokens. 
+       /// </summary>
+       
+       public class WhitespaceTokenizer:CharTokenizer
+       {
+               /// <summary>Construct a new WhitespaceTokenizer. </summary>
+               public WhitespaceTokenizer(System.IO.TextReader in_Renamed):base(in_Renamed)
+               {
+               }
+               
+               /// <summary>Construct a new WhitespaceTokenizer using a given {@link AttributeSource}. </summary>
+               public WhitespaceTokenizer(AttributeSource source, System.IO.TextReader in_Renamed):base(source, in_Renamed)
+               {
+               }
+               
+               /// <summary>Construct a new WhitespaceTokenizer using a given {@link Mono.Lucene.Net.Util.AttributeSource.AttributeFactory}. </summary>
+               public WhitespaceTokenizer(AttributeFactory factory, System.IO.TextReader in_Renamed):base(factory, in_Renamed)
+               {
+               }
+               
+               /// <summary>Collects only characters which do not satisfy
+               /// {@link Character#isWhitespace(char)}.
+               /// </summary>
+               protected internal override bool IsTokenChar(char c)
+               {
+                       return !System.Char.IsWhiteSpace(c);
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/WordlistLoader.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Analysis/WordlistLoader.cs
new file mode 100644 (file)
index 0000000..9db9a55
--- /dev/null
@@ -0,0 +1,193 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Analysis
+{
+       
+       /// <summary> Loader for text files that represent a list of stopwords.
+       /// 
+       /// 
+       /// </summary>
+       /// <version>  $Id: WordlistLoader.java 706342 2008-10-20 17:19:29Z gsingers $
+       /// </version>
+       public class WordlistLoader
+       {
+               
+               /// <summary> Loads a text file and adds every line as an entry to a HashSet (omitting
+               /// leading and trailing whitespace). Every line of the file should contain only
+               /// one word. The words need to be in lowercase if you make use of an
+               /// Analyzer which uses LowerCaseFilter (like StandardAnalyzer).
+               /// 
+               /// </summary>
+               /// <param name="wordfile">File containing the wordlist
+               /// </param>
+               /// <returns> A HashSet with the file's words
+               /// </returns>
+               public static System.Collections.Hashtable GetWordSet(System.IO.FileInfo wordfile)
+               {
+                       System.Collections.Hashtable result = new System.Collections.Hashtable();
+                       System.IO.StreamReader reader = null;
+                       try
+                       {
+                               reader = new System.IO.StreamReader(wordfile.FullName, System.Text.Encoding.Default);
+                               result = GetWordSet(reader);
+                       }
+                       finally
+                       {
+                               if (reader != null)
+                                       reader.Close();
+                       }
+                       return result;
+               }
+               
+               /// <summary> Loads a text file and adds every non-comment line as an entry to a HashSet (omitting
+               /// leading and trailing whitespace). Every line of the file should contain only
+               /// one word. The words need to be in lowercase if you make use of an
+               /// Analyzer which uses LowerCaseFilter (like StandardAnalyzer).
+               /// 
+               /// </summary>
+               /// <param name="wordfile">File containing the wordlist
+               /// </param>
+               /// <param name="comment">The comment string to ignore
+               /// </param>
+               /// <returns> A HashSet with the file's words
+               /// </returns>
+               public static System.Collections.Hashtable GetWordSet(System.IO.FileInfo wordfile, System.String comment)
+               {
+                       System.Collections.Hashtable result = new System.Collections.Hashtable();
+                       System.IO.StreamReader reader = null;
+                       try
+                       {
+                               reader = new System.IO.StreamReader(wordfile.FullName, System.Text.Encoding.Default);
+                               result = GetWordSet(reader, comment);
+                       }
+                       finally
+                       {
+                               if (reader != null)
+                                       reader.Close();
+                       }
+                       return result;
+               }
+               
+               
+               /// <summary> Reads lines from a Reader and adds every line as an entry to a HashSet (omitting
+               /// leading and trailing whitespace). Every line of the Reader should contain only
+               /// one word. The words need to be in lowercase if you make use of an
+               /// Analyzer which uses LowerCaseFilter (like StandardAnalyzer).
+               /// 
+               /// </summary>
+               /// <param name="reader">Reader containing the wordlist
+               /// </param>
+               /// <returns> A HashSet with the reader's words
+               /// </returns>
+               public static System.Collections.Hashtable GetWordSet(System.IO.TextReader reader)
+               {
+                       System.Collections.Hashtable result = new System.Collections.Hashtable();
+                       System.IO.TextReader br = null;
+                       try
+                       {
+                               System.String word = null;
+                               while ((word = reader.ReadLine()) != null)
+                               {
+                                       SupportClass.CollectionsHelper.Add(result, word.Trim());
+                               }
+                       }
+                       finally
+                       {
+                               if (br != null)
+                                       br.Close();
+                       }
+                       return result;
+               }
+               
+               /// <summary> Reads lines from a Reader and adds every non-comment line as an entry to a HashSet (omitting
+               /// leading and trailing whitespace). Every line of the Reader should contain only
+               /// one word. The words need to be in lowercase if you make use of an
+               /// Analyzer which uses LowerCaseFilter (like StandardAnalyzer).
+               /// 
+               /// </summary>
+               /// <param name="reader">Reader containing the wordlist
+               /// </param>
+               /// <param name="comment">The string representing a comment.
+               /// </param>
+               /// <returns> A HashSet with the reader's words
+               /// </returns>
+        public static System.Collections.Hashtable GetWordSet(System.IO.TextReader reader, System.String comment)
+               {
+                       System.Collections.Hashtable result = new System.Collections.Hashtable();
+                       System.IO.StreamReader br = null;
+                       try
+                       {
+                               System.String word = null;
+                               while ((word = reader.ReadLine()) != null)
+                               {
+                                       if (word.StartsWith(comment) == false)
+                                       {
+                                               SupportClass.CollectionsHelper.Add(result, word.Trim());
+                                       }
+                               }
+                       }
+                       finally
+                       {
+                               if (br != null)
+                                       br.Close();
+                       }
+                       return result;
+               }
+               
+               
+               
+               /// <summary> Reads a stem dictionary. Each line contains:
+               /// <pre>word<b>\t</b>stem</pre>
+               /// (i.e. two tab seperated words)
+               /// 
+               /// </summary>
+               /// <returns> stem dictionary that overrules the stemming algorithm
+               /// </returns>
+               /// <throws>  IOException  </throws>
+               public static System.Collections.Hashtable GetStemDict(System.IO.FileInfo wordstemfile)
+               {
+                       if (wordstemfile == null)
+                               throw new System.NullReferenceException("wordstemfile may not be null");
+                       System.Collections.Hashtable result = new System.Collections.Hashtable();
+                       System.IO.StreamReader br = null;
+                       System.IO.StreamReader fr = null;
+                       try
+                       {
+                               fr = new System.IO.StreamReader(wordstemfile.FullName, System.Text.Encoding.Default);
+                               br = new System.IO.StreamReader(fr.BaseStream, fr.CurrentEncoding);
+                               System.String line;
+                char[] tab = {'\t'};
+                               while ((line = br.ReadLine()) != null)
+                               {
+                                       System.String[] wordstem = line.Split(tab, 2);
+                                       result[wordstem[0]] = wordstem[1];
+                               }
+                       }
+                       finally
+                       {
+                               if (fr != null)
+                                       fr.Close();
+                               if (br != null)
+                                       br.Close();
+                       }
+                       return result;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Document/.gitattributes b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Document/.gitattributes
new file mode 100644 (file)
index 0000000..5f411d8
--- /dev/null
@@ -0,0 +1,15 @@
+/AbstractField.cs -crlf
+/CompressionTools.cs -crlf
+/DateField.cs -crlf
+/DateTools.cs -crlf
+/Document.cs -crlf
+/Field.cs -crlf
+/FieldSelector.cs -crlf
+/FieldSelectorResult.cs -crlf
+/Fieldable.cs -crlf
+/LoadFirstFieldSelector.cs -crlf
+/MapFieldSelector.cs -crlf
+/NumberTools.cs -crlf
+/NumericField.cs -crlf
+/Package.html -crlf
+/SetBasedFieldSelector.cs -crlf
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Document/AbstractField.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Document/AbstractField.cs
new file mode 100644 (file)
index 0000000..1f68066
--- /dev/null
@@ -0,0 +1,472 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using TokenStream = Mono.Lucene.Net.Analysis.TokenStream;
+using StringHelper = Mono.Lucene.Net.Util.StringHelper;
+using PhraseQuery = Mono.Lucene.Net.Search.PhraseQuery;
+using SpanQuery = Mono.Lucene.Net.Search.Spans.SpanQuery;
+
+namespace Mono.Lucene.Net.Documents
+{
+       
+       
+       /// <summary> 
+       /// 
+       /// 
+       /// </summary>
+       [Serializable]
+       public abstract class AbstractField : Fieldable
+       {
+               
+               protected internal System.String name = "body";
+               protected internal bool storeTermVector = false;
+               protected internal bool storeOffsetWithTermVector = false;
+               protected internal bool storePositionWithTermVector = false;
+               protected internal bool omitNorms = false;
+               protected internal bool isStored = false;
+               protected internal bool isIndexed = true;
+               protected internal bool isTokenized = true;
+               protected internal bool isBinary = false;
+               protected internal bool isCompressed = false;
+               protected internal bool lazy = false;
+               protected internal bool omitTermFreqAndPositions = false;
+               protected internal float boost = 1.0f;
+               // the data object for all different kind of field values
+               protected internal System.Object fieldsData = null;
+               // pre-analyzed tokenStream for indexed fields
+               protected internal TokenStream tokenStream;
+               // length/offset for all primitive types
+               protected internal int binaryLength;
+               protected internal int binaryOffset;
+               
+               protected internal AbstractField()
+               {
+               }
+               
+               protected internal AbstractField(System.String name, Field.Store store, Field.Index index, Field.TermVector termVector)
+               {
+                       if (name == null)
+                               throw new System.NullReferenceException("name cannot be null");
+                       this.name = StringHelper.Intern(name); // field names are interned
+                       
+                       if (store == Field.Store.YES)
+                       {
+                               this.isStored = true;
+                               this.isCompressed = false;
+                       }
+                       else if (store == Field.Store.COMPRESS)
+                       {
+                               this.isStored = true;
+                               this.isCompressed = true;
+                       }
+                       else if (store == Field.Store.NO)
+                       {
+                               this.isStored = false;
+                               this.isCompressed = false;
+                       }
+                       else
+                       {
+                               throw new System.ArgumentException("unknown store parameter " + store);
+                       }
+                       
+                       if (index == Field.Index.NO)
+                       {
+                               this.isIndexed = false;
+                               this.isTokenized = false;
+                       }
+                       else if (index == Field.Index.ANALYZED)
+                       {
+                               this.isIndexed = true;
+                               this.isTokenized = true;
+                       }
+                       else if (index == Field.Index.NOT_ANALYZED)
+                       {
+                               this.isIndexed = true;
+                               this.isTokenized = false;
+                       }
+                       else if (index == Field.Index.NOT_ANALYZED_NO_NORMS)
+                       {
+                               this.isIndexed = true;
+                               this.isTokenized = false;
+                               this.omitNorms = true;
+                       }
+                       else if (index == Field.Index.ANALYZED_NO_NORMS)
+                       {
+                               this.isIndexed = true;
+                               this.isTokenized = true;
+                               this.omitNorms = true;
+                       }
+                       else
+                       {
+                               throw new System.ArgumentException("unknown index parameter " + index);
+                       }
+                       
+                       this.isBinary = false;
+                       
+                       SetStoreTermVector(termVector);
+               }
+               
+               /// <summary>Sets the boost factor hits on this field.  This value will be
+               /// multiplied into the score of all hits on this this field of this
+               /// document.
+               /// 
+               /// <p/>The boost is multiplied by {@link Mono.Lucene.Net.Documents.Document#GetBoost()} of the document
+               /// containing this field.  If a document has multiple fields with the same
+               /// name, all such values are multiplied together.  This product is then
+               /// used to compute the norm factor for the field.  By
+               /// default, in the {@link
+               /// Mono.Lucene.Net.Search.Similarity#ComputeNorm(String,
+               /// FieldInvertState)} method, the boost value is multipled
+               /// by the {@link
+               /// Mono.Lucene.Net.Search.Similarity#LengthNorm(String,
+               /// int)} and then
+               /// rounded by {@link Mono.Lucene.Net.Search.Similarity#EncodeNorm(float)} before it is stored in the
+               /// index.  One should attempt to ensure that this product does not overflow
+               /// the range of that encoding.
+               /// 
+               /// </summary>
+               /// <seealso cref="Mono.Lucene.Net.Documents.Document.SetBoost(float)">
+               /// </seealso>
+               /// <seealso cref="Mono.Lucene.Net.Search.Similarity.ComputeNorm(String, Mono.Lucene.Net.Index.FieldInvertState)">
+               /// </seealso>
+               /// <seealso cref="Mono.Lucene.Net.Search.Similarity.EncodeNorm(float)">
+               /// </seealso>
+               public virtual void  SetBoost(float boost)
+               {
+                       this.boost = boost;
+               }
+               
+               /// <summary>Returns the boost factor for hits for this field.
+               /// 
+               /// <p/>The default value is 1.0.
+               /// 
+               /// <p/>Note: this value is not stored directly with the document in the index.
+               /// Documents returned from {@link Mono.Lucene.Net.Index.IndexReader#Document(int)} and
+               /// {@link Mono.Lucene.Net.Search.Hits#Doc(int)} may thus not have the same value present as when
+               /// this field was indexed.
+               /// 
+               /// </summary>
+               /// <seealso cref="SetBoost(float)">
+               /// </seealso>
+               public virtual float GetBoost()
+               {
+                       return boost;
+               }
+               
+               /// <summary>Returns the name of the field as an interned string.
+               /// For example "date", "title", "body", ...
+               /// </summary>
+               public virtual System.String Name()
+               {
+                       return name;
+               }
+               
+               protected internal virtual void  SetStoreTermVector(Field.TermVector termVector)
+               {
+                       if (termVector == Field.TermVector.NO)
+                       {
+                               this.storeTermVector = false;
+                               this.storePositionWithTermVector = false;
+                               this.storeOffsetWithTermVector = false;
+                       }
+                       else if (termVector == Field.TermVector.YES)
+                       {
+                               this.storeTermVector = true;
+                               this.storePositionWithTermVector = false;
+                               this.storeOffsetWithTermVector = false;
+                       }
+                       else if (termVector == Field.TermVector.WITH_POSITIONS)
+                       {
+                               this.storeTermVector = true;
+                               this.storePositionWithTermVector = true;
+                               this.storeOffsetWithTermVector = false;
+                       }
+                       else if (termVector == Field.TermVector.WITH_OFFSETS)
+                       {
+                               this.storeTermVector = true;
+                               this.storePositionWithTermVector = false;
+                               this.storeOffsetWithTermVector = true;
+                       }
+                       else if (termVector == Field.TermVector.WITH_POSITIONS_OFFSETS)
+                       {
+                               this.storeTermVector = true;
+                               this.storePositionWithTermVector = true;
+                               this.storeOffsetWithTermVector = true;
+                       }
+                       else
+                       {
+                               throw new System.ArgumentException("unknown termVector parameter " + termVector);
+                       }
+               }
+               
+               /// <summary>True iff the value of the field is to be stored in the index for return
+               /// with search hits.  It is an error for this to be true if a field is
+               /// Reader-valued. 
+               /// </summary>
+               public bool IsStored()
+               {
+                       return isStored;
+               }
+               
+               /// <summary>True iff the value of the field is to be indexed, so that it may be
+               /// searched on. 
+               /// </summary>
+               public bool IsIndexed()
+               {
+                       return isIndexed;
+               }
+               
+               /// <summary>True iff the value of the field should be tokenized as text prior to
+               /// indexing.  Un-tokenized fields are indexed as a single word and may not be
+               /// Reader-valued. 
+               /// </summary>
+               public bool IsTokenized()
+               {
+                       return isTokenized;
+               }
+               
+               /// <summary>True if the value of the field is stored and compressed within the index </summary>
+               public bool IsCompressed()
+               {
+                       return isCompressed;
+               }
+               
+               /// <summary>True iff the term or terms used to index this field are stored as a term
+               /// vector, available from {@link Mono.Lucene.Net.Index.IndexReader#GetTermFreqVector(int,String)}.
+               /// These methods do not provide access to the original content of the field,
+               /// only to terms used to index it. If the original content must be
+               /// preserved, use the <code>stored</code> attribute instead.
+               /// 
+               /// </summary>
+               /// <seealso cref="Mono.Lucene.Net.Index.IndexReader.GetTermFreqVector(int, String)">
+               /// </seealso>
+               public bool IsTermVectorStored()
+               {
+                       return storeTermVector;
+               }
+               
+               /// <summary> True iff terms are stored as term vector together with their offsets 
+               /// (start and end position in source text).
+               /// </summary>
+               public virtual bool IsStoreOffsetWithTermVector()
+               {
+                       return storeOffsetWithTermVector;
+               }
+               
+               /// <summary> True iff terms are stored as term vector together with their token positions.</summary>
+               public virtual bool IsStorePositionWithTermVector()
+               {
+                       return storePositionWithTermVector;
+               }
+               
+               /// <summary>True iff the value of the filed is stored as binary </summary>
+               public bool IsBinary()
+               {
+                       return isBinary;
+               }
+               
+               
+               /// <summary> Return the raw byte[] for the binary field.  Note that
+               /// you must also call {@link #getBinaryLength} and {@link
+               /// #getBinaryOffset} to know which range of bytes in this
+               /// returned array belong to the field.
+               /// </summary>
+               /// <returns> reference to the Field value as byte[].
+               /// </returns>
+               public virtual byte[] GetBinaryValue()
+               {
+                       return GetBinaryValue(null);
+               }
+               
+               public virtual byte[] GetBinaryValue(byte[] result)
+               {
+                       if (isBinary || fieldsData is byte[])
+                               return (byte[]) fieldsData;
+                       else
+                               return null;
+               }
+               
+               /// <summary> Returns length of byte[] segment that is used as value, if Field is not binary
+               /// returned value is undefined
+               /// </summary>
+               /// <returns> length of byte[] segment that represents this Field value
+               /// </returns>
+               public virtual int GetBinaryLength()
+               {
+                       if (isBinary)
+                       {
+                               if (!isCompressed)
+                                       return binaryLength;
+                               else
+                                       return ((byte[]) fieldsData).Length;
+                       }
+                       else if (fieldsData is byte[])
+                               return ((byte[]) fieldsData).Length;
+                       else
+                               return 0;
+               }
+               
+               /// <summary> Returns offset into byte[] segment that is used as value, if Field is not binary
+               /// returned value is undefined
+               /// </summary>
+               /// <returns> index of the first character in byte[] segment that represents this Field value
+               /// </returns>
+               public virtual int GetBinaryOffset()
+               {
+                       return binaryOffset;
+               }
+               
+               /// <summary>True if norms are omitted for this indexed field </summary>
+               public virtual bool GetOmitNorms()
+               {
+                       return omitNorms;
+               }
+               
+               /// <deprecated> Renamed to {@link #getOmitTermFreqAndPositions} 
+               /// </deprecated>
+        [Obsolete("Renamed to GetOmitTermFreqAndPositions")]
+               public virtual bool GetOmitTf()
+               {
+                       return omitTermFreqAndPositions;
+               }
+               
+               /// <seealso cref="setOmitTermFreqAndPositions">
+               /// </seealso>
+               public virtual bool GetOmitTermFreqAndPositions()
+               {
+                       return omitTermFreqAndPositions;
+               }
+               
+               /// <summary>Expert:
+               /// 
+               /// If set, omit normalization factors associated with this indexed field.
+               /// This effectively disables indexing boosts and length normalization for this field.
+               /// </summary>
+               public virtual void  SetOmitNorms(bool omitNorms)
+               {
+                       this.omitNorms = omitNorms;
+               }
+               
+               /// <deprecated> Renamed to {@link #setOmitTermFreqAndPositions} 
+               /// </deprecated>
+        [Obsolete("Renamed to SetOmitTermFreqAndPositions")]
+               public virtual void  SetOmitTf(bool omitTermFreqAndPositions)
+               {
+                       this.omitTermFreqAndPositions = omitTermFreqAndPositions;
+               }
+               
+               /// <summary>Expert:
+               /// 
+               /// If set, omit term freq, positions and payloads from
+               /// postings for this field.
+               /// 
+               /// <p/><b>NOTE</b>: While this option reduces storage space
+               /// required in the index, it also means any query
+               /// requiring positional information, such as {@link
+               /// PhraseQuery} or {@link SpanQuery} subclasses will
+               /// silently fail to find results.
+               /// </summary>
+               public virtual void  SetOmitTermFreqAndPositions(bool omitTermFreqAndPositions)
+               {
+                       this.omitTermFreqAndPositions = omitTermFreqAndPositions;
+               }
+               
+               public virtual bool IsLazy()
+               {
+                       return lazy;
+               }
+               
+               /// <summary>Prints a Field for human consumption. </summary>
+               public override System.String ToString()
+               {
+                       System.Text.StringBuilder result = new System.Text.StringBuilder();
+                       if (isStored)
+                       {
+                               result.Append("stored");
+                               if (isCompressed)
+                                       result.Append("/compressed");
+                               else
+                                       result.Append("/uncompressed");
+                       }
+                       if (isIndexed)
+                       {
+                               if (result.Length > 0)
+                                       result.Append(",");
+                               result.Append("indexed");
+                       }
+                       if (isTokenized)
+                       {
+                               if (result.Length > 0)
+                                       result.Append(",");
+                               result.Append("tokenized");
+                       }
+                       if (storeTermVector)
+                       {
+                               if (result.Length > 0)
+                                       result.Append(",");
+                               result.Append("termVector");
+                       }
+                       if (storeOffsetWithTermVector)
+                       {
+                               if (result.Length > 0)
+                                       result.Append(",");
+                               result.Append("termVectorOffsets");
+                       }
+                       if (storePositionWithTermVector)
+                       {
+                               if (result.Length > 0)
+                                       result.Append(",");
+                               result.Append("termVectorPosition");
+                       }
+                       if (isBinary)
+                       {
+                               if (result.Length > 0)
+                                       result.Append(",");
+                               result.Append("binary");
+                       }
+                       if (omitNorms)
+                       {
+                               result.Append(",omitNorms");
+                       }
+                       if (omitTermFreqAndPositions)
+                       {
+                               result.Append(",omitTermFreqAndPositions");
+                       }
+                       if (lazy)
+                       {
+                               result.Append(",lazy");
+                       }
+                       result.Append('<');
+                       result.Append(name);
+                       result.Append(':');
+                       
+                       if (fieldsData != null && lazy == false)
+                       {
+                               result.Append(fieldsData);
+                       }
+                       
+                       result.Append('>');
+                       return result.ToString();
+               }
+               public abstract Mono.Lucene.Net.Analysis.TokenStream TokenStreamValue();
+               public abstract System.IO.TextReader ReaderValue();
+               public abstract System.String StringValue();
+               public abstract byte[] BinaryValue();
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Document/CompressionTools.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Document/CompressionTools.cs
new file mode 100644 (file)
index 0000000..827d86f
--- /dev/null
@@ -0,0 +1,152 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+/// To enable compression support in Mono.Lucene.Net ,
+/// you will need to define 'SHARP_ZIP_LIB' and reference the SharpLibZip 
+/// library.  The SharpLibZip library can be downloaded from: 
+/// http://www.icsharpcode.net/OpenSource/SharpZipLib/
+
+using System;
+
+using UnicodeUtil = Mono.Lucene.Net.Util.UnicodeUtil;
+
+namespace Mono.Lucene.Net.Documents
+{
+       
+       /// <summary>Simple utility class providing static methods to
+       /// compress and decompress binary data for stored fields.
+       /// This class uses java.util.zip.Deflater and Inflater
+       /// classes to compress and decompress, which is the same
+       /// format previously used by the now deprecated
+       /// Field.Store.COMPRESS.
+       /// </summary>
+       
+       public class CompressionTools
+       {
+               
+               // Export only static methods
+               private CompressionTools()
+               {
+               }
+               
+               /// <summary>Compresses the specified byte range using the
+               /// specified compressionLevel (constants are defined in
+               /// java.util.zip.Deflater). 
+               /// </summary>
+               public static byte[] Compress(byte[] value_Renamed, int offset, int length, int compressionLevel)
+               {
+                       /* Create an expandable byte array to hold the compressed data.
+                       * You cannot use an array that's the same size as the orginal because
+                       * there is no guarantee that the compressed data will be smaller than
+                       * the uncompressed data. */
+                       System.IO.MemoryStream bos = new System.IO.MemoryStream(length);
+
+            SupportClass.SharpZipLib.Deflater compressor = SupportClass.SharpZipLib.CreateDeflater();
+                       
+                       try
+                       {
+                               compressor.SetLevel(compressionLevel);
+                               compressor.SetInput(value_Renamed, offset, length);
+                               compressor.Finish();
+                               
+                               // Compress the data
+                               byte[] buf = new byte[1024];
+                               while (!compressor.IsFinished)
+                               {
+                                       int count = compressor.Deflate(buf);
+                                       bos.Write(buf, 0, count);
+                               }
+                       }
+                       finally
+                       {
+                       }
+                       
+                       return bos.ToArray();
+               }
+               
+               /// <summary>Compresses the specified byte range, with default BEST_COMPRESSION level </summary>
+               public static byte[] Compress(byte[] value_Renamed, int offset, int length)
+        {
+                       return Compress(value_Renamed, offset, length, SupportClass.SharpZipLib.Deflater.BEST_COMPRESSION);
+               }
+               
+               /// <summary>Compresses all bytes in the array, with default BEST_COMPRESSION level </summary>
+               public static byte[] Compress(byte[] value_Renamed)
+               {
+            return Compress(value_Renamed, 0, value_Renamed.Length, SupportClass.SharpZipLib.Deflater.BEST_COMPRESSION);
+               }
+               
+               /// <summary>Compresses the String value, with default BEST_COMPRESSION level </summary>
+               public static byte[] CompressString(System.String value_Renamed)
+               {
+            return CompressString(value_Renamed, SupportClass.SharpZipLib.Deflater.BEST_COMPRESSION);
+               }
+               
+               /// <summary>Compresses the String value using the specified
+               /// compressionLevel (constants are defined in
+               /// java.util.zip.Deflater). 
+               /// </summary>
+               public static byte[] CompressString(System.String value_Renamed, int compressionLevel)
+               {
+                       UnicodeUtil.UTF8Result result = new UnicodeUtil.UTF8Result();
+                       UnicodeUtil.UTF16toUTF8(value_Renamed, 0, value_Renamed.Length, result);
+                       return Compress(result.result, 0, result.length, compressionLevel);
+               }
+               
+               /// <summary>Decompress the byte array previously returned by
+               /// compress 
+               /// </summary>
+               public static byte[] Decompress(byte[] value_Renamed)
+               {
+                       // Create an expandable byte array to hold the decompressed data
+                       System.IO.MemoryStream bos = new System.IO.MemoryStream(value_Renamed.Length);
+                       
+                       SupportClass.SharpZipLib.Inflater decompressor = SupportClass.SharpZipLib.CreateInflater();
+                       
+                       try
+                       {
+                               decompressor.SetInput(value_Renamed);
+                               
+                               // Decompress the data
+                               byte[] buf = new byte[1024];
+                               while (!decompressor.IsFinished)
+                               {
+                                       int count = decompressor.Inflate(buf);
+                                       bos.Write(buf, 0, count);
+                               }
+                       }
+                       finally
+                       {
+                       }
+                       
+                       return bos.ToArray();
+               }
+               
+               /// <summary>Decompress the byte array previously returned by
+               /// compressString back into a String 
+               /// </summary>
+               public static System.String DecompressString(byte[] value_Renamed)
+               {
+                       UnicodeUtil.UTF16Result result = new UnicodeUtil.UTF16Result();
+                       byte[] bytes = Decompress(value_Renamed);
+                       UnicodeUtil.UTF8toUTF16(bytes, 0, bytes.Length, result);
+                       return new System.String(result.result, 0, result.length);
+               }
+       }
+}
+
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Document/DateField.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Document/DateField.cs
new file mode 100644 (file)
index 0000000..64b538a
--- /dev/null
@@ -0,0 +1,138 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using NumericUtils = Mono.Lucene.Net.Util.NumericUtils;
+using NumericRangeQuery = Mono.Lucene.Net.Search.NumericRangeQuery;
+using PrefixQuery = Mono.Lucene.Net.Search.PrefixQuery;
+using TermRangeQuery = Mono.Lucene.Net.Search.TermRangeQuery;
+// for javadoc
+
+namespace Mono.Lucene.Net.Documents
+{
+       // for javadoc
+       
+       // do not remove in 3.0, needed for reading old indexes!
+       
+       /// <summary> Provides support for converting dates to strings and vice-versa.
+       /// The strings are structured so that lexicographic sorting orders by date,
+       /// which makes them suitable for use as field values and search terms.
+       /// 
+       /// <p/>Note that this class saves dates with millisecond granularity,
+       /// which is bad for {@link TermRangeQuery} and {@link PrefixQuery}, as those
+       /// queries are expanded to a BooleanQuery with a potentially large number
+       /// of terms when searching. Thus you might want to use
+       /// {@link DateTools} instead.
+       /// 
+       /// <p/>
+       /// Note: dates before 1970 cannot be used, and therefore cannot be
+       /// indexed when using this class. See {@link DateTools} for an
+       /// alternative without such a limitation.
+       /// 
+       /// <p/>
+       /// Another approach is {@link NumericUtils}, which provides
+       /// a sortable binary representation (prefix encoded) of numeric values, which
+       /// date/time are.
+       /// For indexing a {@link Date} or {@link Calendar}, just get the unix timestamp as
+       /// <code>long</code> using {@link Date#getTime} or {@link Calendar#getTimeInMillis} and
+       /// index this as a numeric value with {@link NumericField}
+       /// and use {@link NumericRangeQuery} to query it.
+       /// 
+       /// </summary>
+       /// <deprecated> If you build a new index, use {@link DateTools} or 
+       /// {@link NumericField} instead.
+       /// This class is included for use with existing
+       /// indices and will be removed in a future release.
+       /// </deprecated>
+    [Obsolete("If you build a new index, use DateTools or NumericField instead.This class is included for use with existing indices and will be removed in a future release.")]
+       public class DateField
+       {
+               
+               private DateField()
+               {
+               }
+               
+               // make date strings long enough to last a millenium
+        private static int DATE_LEN = SupportClass.Number.ToString(1000L * 365 * 24 * 60 * 60 * 1000, SupportClass.Number.MAX_RADIX).Length;
+
+               public static System.String MIN_DATE_STRING()
+               {
+                       return TimeToString(0);
+               }
+               
+               public static System.String MAX_DATE_STRING()
+               {
+                       char[] buffer = new char[DATE_LEN];
+            char c = SupportClass.Character.ForDigit(SupportClass.Character.MAX_RADIX - 1, SupportClass.Character.MAX_RADIX);
+                       for (int i = 0; i < DATE_LEN; i++)
+                               buffer[i] = c;
+                       return new System.String(buffer);
+               }
+               
+               /// <summary> Converts a Date to a string suitable for indexing.</summary>
+               /// <throws>  RuntimeException if the date specified in the </throws>
+               /// <summary> method argument is before 1970
+               /// </summary>
+        public static System.String DateToString(System.DateTime date)
+        {
+            TimeSpan ts = date.Subtract(new DateTime(1970, 1, 1));
+            ts = ts.Subtract(TimeZone.CurrentTimeZone.GetUtcOffset(date));
+            return TimeToString(ts.Ticks / TimeSpan.TicksPerMillisecond);
+        }
+               /// <summary> Converts a millisecond time to a string suitable for indexing.</summary>
+               /// <throws>  RuntimeException if the time specified in the </throws>
+               /// <summary> method argument is negative, that is, before 1970
+               /// </summary>
+               public static System.String TimeToString(long time)
+               {
+                       if (time < 0)
+                               throw new System.SystemException("time '" + time + "' is too early, must be >= 0");
+
+            System.String s = SupportClass.Number.ToString(time, SupportClass.Character.MAX_RADIX);
+                       
+                       if (s.Length > DATE_LEN)
+                               throw new System.SystemException("time '" + time + "' is too late, length of string " + "representation must be <= " + DATE_LEN);
+                       
+                       // Pad with leading zeros
+                       if (s.Length < DATE_LEN)
+                       {
+                               System.Text.StringBuilder sb = new System.Text.StringBuilder(s);
+                               while (sb.Length < DATE_LEN)
+                                       sb.Insert(0, 0);
+                               s = sb.ToString();
+                       }
+                       
+                       return s;
+               }
+               
+               /// <summary>Converts a string-encoded date into a millisecond time. </summary>
+               public static long StringToTime(System.String s)
+               {
+            return SupportClass.Number.Parse(s, SupportClass.Number.MAX_RADIX);
+               }
+               /// <summary>Converts a string-encoded date into a Date object. </summary>
+        public static System.DateTime StringToDate(System.String s)
+        {
+            long ticks = StringToTime(s) * TimeSpan.TicksPerMillisecond;
+            System.DateTime date = new System.DateTime(1970, 1, 1);
+            date = date.AddTicks(ticks);
+            date = date.Add(TimeZone.CurrentTimeZone.GetUtcOffset(date));
+            return date;
+        }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Document/DateTools.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Document/DateTools.cs
new file mode 100644 (file)
index 0000000..8d63243
--- /dev/null
@@ -0,0 +1,350 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using NumericUtils = Mono.Lucene.Net.Util.NumericUtils;
+using NumericRangeQuery = Mono.Lucene.Net.Search.NumericRangeQuery;
+
+namespace Mono.Lucene.Net.Documents
+{
+       
+       /// <summary> Provides support for converting dates to strings and vice-versa.
+       /// The strings are structured so that lexicographic sorting orders 
+       /// them by date, which makes them suitable for use as field values 
+       /// and search terms.
+       /// 
+       /// <p/>This class also helps you to limit the resolution of your dates. Do not
+       /// save dates with a finer resolution than you really need, as then
+       /// RangeQuery and PrefixQuery will require more memory and become slower.
+       /// 
+       /// <p/>Compared to {@link DateField} the strings generated by the methods
+       /// in this class take slightly more space, unless your selected resolution
+       /// is set to <code>Resolution.DAY</code> or lower.
+       /// 
+       /// <p/>
+       /// Another approach is {@link NumericUtils}, which provides
+       /// a sortable binary representation (prefix encoded) of numeric values, which
+       /// date/time are.
+       /// For indexing a {@link Date} or {@link Calendar}, just get the unix timestamp as
+       /// <code>long</code> using {@link Date#getTime} or {@link Calendar#getTimeInMillis} and
+       /// index this as a numeric value with {@link NumericField}
+       /// and use {@link NumericRangeQuery} to query it.
+       /// </summary>
+       public class DateTools
+       {
+               
+        private static readonly System.String YEAR_FORMAT = "yyyy";
+        private static readonly System.String MONTH_FORMAT = "yyyyMM";
+        private static readonly System.String DAY_FORMAT = "yyyyMMdd";
+        private static readonly System.String HOUR_FORMAT = "yyyyMMddHH";
+        private static readonly System.String MINUTE_FORMAT = "yyyyMMddHHmm";
+        private static readonly System.String SECOND_FORMAT = "yyyyMMddHHmmss";
+        private static readonly System.String MILLISECOND_FORMAT = "yyyyMMddHHmmssfff";
+               
+               private static readonly System.Globalization.Calendar calInstance = new System.Globalization.GregorianCalendar();
+               
+               // cannot create, the class has static methods only
+               private DateTools()
+               {
+               }
+               
+               /// <summary> Converts a Date to a string suitable for indexing.
+               /// 
+               /// </summary>
+               /// <param name="date">the date to be converted
+               /// </param>
+               /// <param name="resolution">the desired resolution, see
+               /// {@link #Round(Date, DateTools.Resolution)}
+               /// </param>
+               /// <returns> a string in format <code>yyyyMMddHHmmssSSS</code> or shorter,
+               /// depending on <code>resolution</code>; using GMT as timezone 
+               /// </returns>
+               public static System.String DateToString(System.DateTime date, Resolution resolution)
+               {
+                       return TimeToString(date.Ticks / TimeSpan.TicksPerMillisecond, resolution);
+               }
+               
+               /// <summary> Converts a millisecond time to a string suitable for indexing.
+               /// 
+               /// </summary>
+               /// <param name="time">the date expressed as milliseconds since January 1, 1970, 00:00:00 GMT
+               /// </param>
+               /// <param name="resolution">the desired resolution, see
+               /// {@link #Round(long, DateTools.Resolution)}
+               /// </param>
+               /// <returns> a string in format <code>yyyyMMddHHmmssSSS</code> or shorter,
+               /// depending on <code>resolution</code>; using GMT as timezone
+               /// </returns>
+               public static System.String TimeToString(long time, Resolution resolution)
+               {
+            System.DateTime date = new System.DateTime(Round(time, resolution));
+                       
+                       if (resolution == Resolution.YEAR)
+                       {
+                               return date.ToString(YEAR_FORMAT);
+                       }
+                       else if (resolution == Resolution.MONTH)
+                       {
+                               return date.ToString(MONTH_FORMAT);
+                       }
+                       else if (resolution == Resolution.DAY)
+                       {
+                               return date.ToString(DAY_FORMAT);
+                       }
+                       else if (resolution == Resolution.HOUR)
+                       {
+                               return date.ToString(HOUR_FORMAT);
+                       }
+                       else if (resolution == Resolution.MINUTE)
+                       {
+                               return date.ToString(MINUTE_FORMAT);
+                       }
+                       else if (resolution == Resolution.SECOND)
+                       {
+                               return date.ToString(SECOND_FORMAT);
+                       }
+                       else if (resolution == Resolution.MILLISECOND)
+                       {
+                               return date.ToString(MILLISECOND_FORMAT);
+                       }
+                       
+                       throw new System.ArgumentException("unknown resolution " + resolution);
+               }
+               
+               /// <summary> Converts a string produced by <code>timeToString</code> or
+               /// <code>DateToString</code> back to a time, represented as the
+               /// number of milliseconds since January 1, 1970, 00:00:00 GMT.
+               /// 
+               /// </summary>
+               /// <param name="dateString">the date string to be converted
+               /// </param>
+               /// <returns> the number of milliseconds since January 1, 1970, 00:00:00 GMT
+               /// </returns>
+               /// <throws>  ParseException if <code>dateString</code> is not in the  </throws>
+               /// <summary>  expected format 
+               /// </summary>
+               public static long StringToTime(System.String dateString)
+               {
+                       return StringToDate(dateString).Ticks;
+               }
+               
+               /// <summary> Converts a string produced by <code>timeToString</code> or
+               /// <code>DateToString</code> back to a time, represented as a
+               /// Date object.
+               /// 
+               /// </summary>
+               /// <param name="dateString">the date string to be converted
+               /// </param>
+               /// <returns> the parsed time as a Date object 
+               /// </returns>
+               /// <throws>  ParseException if <code>dateString</code> is not in the  </throws>
+               /// <summary>  expected format 
+               /// </summary>
+               public static System.DateTime StringToDate(System.String dateString)
+               {
+            System.DateTime date;
+            if (dateString.Length == 4)
+            {
+                date = new System.DateTime(Convert.ToInt16(dateString.Substring(0, 4)),
+                    1, 1, 0, 0, 0, 0);
+            }
+            else if (dateString.Length == 6)
+            {
+                date = new System.DateTime(Convert.ToInt16(dateString.Substring(0, 4)),
+                    Convert.ToInt16(dateString.Substring(4, 2)),
+                    1, 0, 0, 0, 0);
+            }
+            else if (dateString.Length == 8)
+            {
+                date = new System.DateTime(Convert.ToInt16(dateString.Substring(0, 4)),
+                    Convert.ToInt16(dateString.Substring(4, 2)),
+                    Convert.ToInt16(dateString.Substring(6, 2)),
+                    0, 0, 0, 0);
+            }
+            else if (dateString.Length == 10)
+            {
+                date = new System.DateTime(Convert.ToInt16(dateString.Substring(0, 4)),
+                    Convert.ToInt16(dateString.Substring(4, 2)),
+                    Convert.ToInt16(dateString.Substring(6, 2)),
+                    Convert.ToInt16(dateString.Substring(8, 2)),
+                    0, 0, 0);
+            }
+            else if (dateString.Length == 12)
+            {
+                date = new System.DateTime(Convert.ToInt16(dateString.Substring(0, 4)),
+                    Convert.ToInt16(dateString.Substring(4, 2)),
+                    Convert.ToInt16(dateString.Substring(6, 2)),
+                    Convert.ToInt16(dateString.Substring(8, 2)),
+                    Convert.ToInt16(dateString.Substring(10, 2)),
+                    0, 0);
+            }
+            else if (dateString.Length == 14)
+            {
+                date = new System.DateTime(Convert.ToInt16(dateString.Substring(0, 4)),
+                    Convert.ToInt16(dateString.Substring(4, 2)),
+                    Convert.ToInt16(dateString.Substring(6, 2)),
+                    Convert.ToInt16(dateString.Substring(8, 2)),
+                    Convert.ToInt16(dateString.Substring(10, 2)),
+                    Convert.ToInt16(dateString.Substring(12, 2)),
+                    0);
+            }
+            else if (dateString.Length == 17)
+            {
+                date = new System.DateTime(Convert.ToInt16(dateString.Substring(0, 4)),
+                    Convert.ToInt16(dateString.Substring(4, 2)),
+                    Convert.ToInt16(dateString.Substring(6, 2)),
+                    Convert.ToInt16(dateString.Substring(8, 2)),
+                    Convert.ToInt16(dateString.Substring(10, 2)),
+                    Convert.ToInt16(dateString.Substring(12, 2)),
+                    Convert.ToInt16(dateString.Substring(14, 3)));
+            }
+            else
+            {
+                throw new System.FormatException("Input is not valid date string: " + dateString);
+            }
+            return date;
+               }
+               
+               /// <summary> Limit a date's resolution. For example, the date <code>2004-09-21 13:50:11</code>
+               /// will be changed to <code>2004-09-01 00:00:00</code> when using
+               /// <code>Resolution.MONTH</code>. 
+               /// 
+               /// </summary>
+               /// <param name="resolution">The desired resolution of the date to be returned
+               /// </param>
+               /// <returns> the date with all values more precise than <code>resolution</code>
+               /// set to 0 or 1
+               /// </returns>
+               public static System.DateTime Round(System.DateTime date, Resolution resolution)
+               {
+                       return new System.DateTime(Round(date.Ticks / TimeSpan.TicksPerMillisecond, resolution));
+               }
+               
+               /// <summary> Limit a date's resolution. For example, the date <code>1095767411000</code>
+               /// (which represents 2004-09-21 13:50:11) will be changed to 
+               /// <code>1093989600000</code> (2004-09-01 00:00:00) when using
+               /// <code>Resolution.MONTH</code>.
+               /// 
+               /// </summary>
+               /// <param name="time">The time in milliseconds (not ticks).</param>
+               /// <param name="resolution">The desired resolution of the date to be returned
+               /// </param>
+               /// <returns> the date with all values more precise than <code>resolution</code>
+               /// set to 0 or 1, expressed as milliseconds since January 1, 1970, 00:00:00 GMT
+               /// </returns>
+               public static long Round(long time, Resolution resolution)
+               {
+                       System.DateTime dt = new System.DateTime(time * TimeSpan.TicksPerMillisecond);
+                       
+                       if (resolution == Resolution.YEAR)
+                       {
+                dt = dt.AddMonths(1 - dt.Month);
+                dt = dt.AddDays(1 - dt.Day);
+                dt = dt.AddHours(0 - dt.Hour);
+                dt = dt.AddMinutes(0 - dt.Minute);
+                dt = dt.AddSeconds(0 - dt.Second);
+                dt = dt.AddMilliseconds(0 - dt.Millisecond);
+            }
+                       else if (resolution == Resolution.MONTH)
+                       {
+                dt = dt.AddDays(1 - dt.Day);
+                dt = dt.AddHours(0 - dt.Hour);
+                dt = dt.AddMinutes(0 - dt.Minute);
+                dt = dt.AddSeconds(0 - dt.Second);
+                dt = dt.AddMilliseconds(0 - dt.Millisecond);
+            }
+                       else if (resolution == Resolution.DAY)
+                       {
+                dt = dt.AddHours(0 - dt.Hour);
+                dt = dt.AddMinutes(0 - dt.Minute);
+                dt = dt.AddSeconds(0 - dt.Second);
+                dt = dt.AddMilliseconds(0 - dt.Millisecond);
+            }
+                       else if (resolution == Resolution.HOUR)
+                       {
+                dt = dt.AddMinutes(0 - dt.Minute);
+                dt = dt.AddSeconds(0 - dt.Second);
+                dt = dt.AddMilliseconds(0 - dt.Millisecond);
+            }
+                       else if (resolution == Resolution.MINUTE)
+                       {
+                dt = dt.AddSeconds(0 - dt.Second);
+                dt = dt.AddMilliseconds(0 - dt.Millisecond);
+            }
+                       else if (resolution == Resolution.SECOND)
+                       {
+                dt = dt.AddMilliseconds(0 - dt.Millisecond);
+            }
+                       else if (resolution == Resolution.MILLISECOND)
+                       {
+                               // don't cut off anything
+                       }
+                       else
+                       {
+                               throw new System.ArgumentException("unknown resolution " + resolution);
+                       }
+                       return dt.Ticks;
+               }
+               
+               /// <summary>Specifies the time granularity. </summary>
+               public class Resolution
+               {
+                       
+                       public static readonly Resolution YEAR = new Resolution("year");
+                       public static readonly Resolution MONTH = new Resolution("month");
+                       public static readonly Resolution DAY = new Resolution("day");
+                       public static readonly Resolution HOUR = new Resolution("hour");
+                       public static readonly Resolution MINUTE = new Resolution("minute");
+                       public static readonly Resolution SECOND = new Resolution("second");
+                       public static readonly Resolution MILLISECOND = new Resolution("millisecond");
+                       
+                       private System.String resolution;
+                       
+                       internal Resolution()
+                       {
+                       }
+                       
+                       internal Resolution(System.String resolution)
+                       {
+                               this.resolution = resolution;
+                       }
+                       
+                       public override System.String ToString()
+                       {
+                               return resolution;
+                       }
+               }
+               static DateTools()
+               {
+                       {
+                               // times need to be normalized so the value doesn't depend on the 
+                               // location the index is created/used:
+                // {{Aroush-2.1}}
+                /*
+                               YEAR_FORMAT.setTimeZone(GMT);
+                               MONTH_FORMAT.setTimeZone(GMT);
+                               DAY_FORMAT.setTimeZone(GMT);
+                               HOUR_FORMAT.setTimeZone(GMT);
+                               MINUTE_FORMAT.setTimeZone(GMT);
+                               SECOND_FORMAT.setTimeZone(GMT);
+                               MILLISECOND_FORMAT.setTimeZone(GMT);
+                */
+                       }
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Document/Document.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Document/Document.cs
new file mode 100644 (file)
index 0000000..4a022d9
--- /dev/null
@@ -0,0 +1,439 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+// for javadoc
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+using ScoreDoc = Mono.Lucene.Net.Search.ScoreDoc;
+using Searcher = Mono.Lucene.Net.Search.Searcher;
+
+namespace Mono.Lucene.Net.Documents
+{
+       
+       /// <summary>Documents are the unit of indexing and search.
+       /// 
+       /// A Document is a set of fields.  Each field has a name and a textual value.
+       /// A field may be {@link Fieldable#IsStored() stored} with the document, in which
+       /// case it is returned with search hits on the document.  Thus each document
+       /// should typically contain one or more stored fields which uniquely identify
+       /// it.
+       /// 
+       /// <p/>Note that fields which are <i>not</i> {@link Fieldable#IsStored() stored} are
+       /// <i>not</i> available in documents retrieved from the index, e.g. with {@link
+       /// ScoreDoc#doc}, {@link Searcher#Doc(int)} or {@link
+       /// IndexReader#Document(int)}.
+       /// </summary>
+       
+       [Serializable]
+       public sealed class Document
+       {
+               private class AnonymousClassEnumeration : System.Collections.IEnumerator
+               {
+                       public AnonymousClassEnumeration(Document enclosingInstance)
+                       {
+                               InitBlock(enclosingInstance);
+                       }
+                       private void  InitBlock(Document enclosingInstance)
+                       {
+                               this.enclosingInstance = enclosingInstance;
+                               iter = Enclosing_Instance.fields.GetEnumerator();
+                       }
+                       private System.Object tempAuxObj;
+                       public bool MoveNext()
+                       {
+                               bool result = HasMoreElements();
+                               if (result)
+                               {
+                                       tempAuxObj = NextElement();
+                               }
+                               return result;
+                       }
+                       public void  Reset()
+                       {
+                               tempAuxObj = null;
+                       }
+                       public System.Object Current
+                       {
+                               get
+                               {
+                                       return tempAuxObj;
+                               }
+                               
+                       }
+                       private Document enclosingInstance;
+                       public Document Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       internal System.Collections.IEnumerator iter;
+                       public bool HasMoreElements()
+                       {
+                               return iter.MoveNext();
+                       }
+                       public System.Object NextElement()
+                       {
+                               return iter.Current;
+                       }
+               }
+               internal System.Collections.IList fields = new System.Collections.ArrayList();
+               private float boost = 1.0f;
+               
+               /// <summary>Constructs a new document with no fields. </summary>
+               public Document()
+               {
+               }
+               
+               
+               /// <summary>Sets a boost factor for hits on any field of this document.  This value
+               /// will be multiplied into the score of all hits on this document.
+               /// 
+               /// <p/>The default value is 1.0.
+               /// 
+               /// <p/>Values are multiplied into the value of {@link Fieldable#GetBoost()} of
+               /// each field in this document.  Thus, this method in effect sets a default
+               /// boost for the fields of this document.
+               /// 
+               /// </summary>
+               /// <seealso cref="Fieldable.SetBoost(float)">
+               /// </seealso>
+               public void  SetBoost(float boost)
+               {
+                       this.boost = boost;
+               }
+               
+               /// <summary>Returns, at indexing time, the boost factor as set by {@link #SetBoost(float)}. 
+               /// 
+               /// <p/>Note that once a document is indexed this value is no longer available
+               /// from the index.  At search time, for retrieved documents, this method always 
+               /// returns 1. This however does not mean that the boost value set at  indexing 
+               /// time was ignored - it was just combined with other indexing time factors and 
+               /// stored elsewhere, for better indexing and search performance. (For more 
+               /// information see the "norm(t,d)" part of the scoring formula in 
+               /// {@link Mono.Lucene.Net.Search.Similarity Similarity}.)
+               /// 
+               /// </summary>
+               /// <seealso cref="SetBoost(float)">
+               /// </seealso>
+               public float GetBoost()
+               {
+                       return boost;
+               }
+               
+               /// <summary> <p/>Adds a field to a document.  Several fields may be added with
+               /// the same name.  In this case, if the fields are indexed, their text is
+               /// treated as though appended for the purposes of search.<p/>
+               /// <p/> Note that add like the removeField(s) methods only makes sense 
+               /// prior to adding a document to an index. These methods cannot
+               /// be used to change the content of an existing index! In order to achieve this,
+               /// a document has to be deleted from an index and a new changed version of that
+               /// document has to be added.<p/>
+               /// </summary>
+               public void  Add(Fieldable field)
+               {
+                       fields.Add(field);
+               }
+               
+               /// <summary> <p/>Removes field with the specified name from the document.
+               /// If multiple fields exist with this name, this method removes the first field that has been added.
+               /// If there is no field with the specified name, the document remains unchanged.<p/>
+               /// <p/> Note that the removeField(s) methods like the add method only make sense 
+               /// prior to adding a document to an index. These methods cannot
+               /// be used to change the content of an existing index! In order to achieve this,
+               /// a document has to be deleted from an index and a new changed version of that
+               /// document has to be added.<p/>
+               /// </summary>
+               public void  RemoveField(System.String name)
+               {
+                       System.Collections.IEnumerator it = fields.GetEnumerator();
+                       while (it.MoveNext())
+                       {
+                               Fieldable field = (Fieldable) it.Current;
+                               if (field.Name().Equals(name))
+                               {
+                    fields.Remove(field);
+                                       return ;
+                               }
+                       }
+               }
+               
+               /// <summary> <p/>Removes all fields with the given name from the document.
+               /// If there is no field with the specified name, the document remains unchanged.<p/>
+               /// <p/> Note that the removeField(s) methods like the add method only make sense 
+               /// prior to adding a document to an index. These methods cannot
+               /// be used to change the content of an existing index! In order to achieve this,
+               /// a document has to be deleted from an index and a new changed version of that
+               /// document has to be added.<p/>
+               /// </summary>
+               public void  RemoveFields(System.String name)
+               {
+            for (int i = fields.Count - 1; i >= 0; i--)
+            {
+                Fieldable field = (Fieldable) fields[i];
+                if (field.Name().Equals(name))
+                {
+                    fields.RemoveAt(i);
+                }
+            }
+               }
+               
+               /// <summary>Returns a field with the given name if any exist in this document, or
+               /// null.  If multiple fields exists with this name, this method returns the
+               /// first value added.
+               /// Do not use this method with lazy loaded fields.
+               /// </summary>
+               public Field GetField(System.String name)
+               {
+                       for (int i = 0; i < fields.Count; i++)
+                       {
+                               Field field = (Field) fields[i];
+                               if (field.Name().Equals(name))
+                                       return field;
+                       }
+                       return null;
+               }
+               
+               
+               /// <summary>Returns a field with the given name if any exist in this document, or
+               /// null.  If multiple fields exists with this name, this method returns the
+               /// first value added.
+               /// </summary>
+               public Fieldable GetFieldable(System.String name)
+               {
+                       for (int i = 0; i < fields.Count; i++)
+                       {
+                               Fieldable field = (Fieldable) fields[i];
+                               if (field.Name().Equals(name))
+                                       return field;
+                       }
+                       return null;
+               }
+               
+               /// <summary>Returns the string value of the field with the given name if any exist in
+               /// this document, or null.  If multiple fields exist with this name, this
+               /// method returns the first value added. If only binary fields with this name
+               /// exist, returns null.
+               /// </summary>
+               public System.String Get(System.String name)
+               {
+                       for (int i = 0; i < fields.Count; i++)
+                       {
+                               Fieldable field = (Fieldable) fields[i];
+                               if (field.Name().Equals(name) && (!field.IsBinary()))
+                                       return field.StringValue();
+                       }
+                       return null;
+               }
+               
+               /// <summary>Returns an Enumeration of all the fields in a document.</summary>
+               /// <deprecated> use {@link #GetFields()} instead
+               /// </deprecated>
+        [Obsolete("Use GetFields() instead")]
+               public System.Collections.IEnumerator Fields()
+               {
+                       return new AnonymousClassEnumeration(this);
+               }
+               
+               /// <summary>Returns a List of all the fields in a document.
+               /// <p/>Note that fields which are <i>not</i> {@link Fieldable#IsStored() stored} are
+               /// <i>not</i> available in documents retrieved from the
+               /// index, e.g. {@link Searcher#Doc(int)} or {@link
+               /// IndexReader#Document(int)}.
+               /// </summary>
+               public System.Collections.IList GetFields()
+               {
+                       return fields;
+               }
+               
+               private static readonly Field[] NO_FIELDS = new Field[0];
+               
+               /// <summary> Returns an array of {@link Field}s with the given name.
+               /// Do not use with lazy loaded fields.
+               /// This method returns an empty array when there are no
+               /// matching fields.  It never returns null.
+               /// 
+               /// </summary>
+               /// <param name="name">the name of the field
+               /// </param>
+               /// <returns> a <code>Field[]</code> array
+               /// </returns>
+               public Field[] GetFields(System.String name)
+               {
+                       System.Collections.ArrayList result = new System.Collections.ArrayList();
+                       for (int i = 0; i < fields.Count; i++)
+                       {
+                               Field field = (Field) fields[i];
+                               if (field.Name().Equals(name))
+                               {
+                                       result.Add(field);
+                               }
+                       }
+                       
+                       if (result.Count == 0)
+                               return NO_FIELDS;
+                       
+                       return (Field[]) result.ToArray(typeof(Field));
+               }
+               
+               
+               private static readonly Fieldable[] NO_FIELDABLES = new Fieldable[0];
+               
+               /// <summary> Returns an array of {@link Fieldable}s with the given name.
+               /// This method returns an empty array when there are no
+               /// matching fields.  It never returns null.
+               /// 
+               /// </summary>
+               /// <param name="name">the name of the field
+               /// </param>
+               /// <returns> a <code>Fieldable[]</code> array
+               /// </returns>
+               public Fieldable[] GetFieldables(System.String name)
+               {
+                       System.Collections.ArrayList result = new System.Collections.ArrayList();
+                       for (int i = 0; i < fields.Count; i++)
+                       {
+                               Fieldable field = (Fieldable) fields[i];
+                               if (field.Name().Equals(name))
+                               {
+                                       result.Add(field);
+                               }
+                       }
+                       
+                       if (result.Count == 0)
+                               return NO_FIELDABLES;
+                       
+                       return (Fieldable[]) result.ToArray(typeof(Fieldable));
+               }
+               
+               
+               private static readonly System.String[] NO_STRINGS = new System.String[0];
+               
+               /// <summary> Returns an array of values of the field specified as the method parameter.
+               /// This method returns an empty array when there are no
+               /// matching fields.  It never returns null.
+               /// </summary>
+               /// <param name="name">the name of the field
+               /// </param>
+               /// <returns> a <code>String[]</code> of field values
+               /// </returns>
+               public System.String[] GetValues(System.String name)
+               {
+                       System.Collections.ArrayList result = new System.Collections.ArrayList();
+                       for (int i = 0; i < fields.Count; i++)
+                       {
+                               Fieldable field = (Fieldable) fields[i];
+                               if (field.Name().Equals(name) && (!field.IsBinary()))
+                                       result.Add(field.StringValue());
+                       }
+                       
+                       if (result.Count == 0)
+                               return NO_STRINGS;
+                       
+                       return (System.String[]) result.ToArray(typeof(System.String));
+               }
+               
+               private static readonly byte[][] NO_BYTES = new byte[0][];
+               
+               /// <summary> Returns an array of byte arrays for of the fields that have the name specified
+               /// as the method parameter.  This method returns an empty
+               /// array when there are no matching fields.  It never
+               /// returns null.
+               /// 
+               /// </summary>
+               /// <param name="name">the name of the field
+               /// </param>
+               /// <returns> a <code>byte[][]</code> of binary field values
+               /// </returns>
+               public byte[][] GetBinaryValues(System.String name)
+               {
+                       System.Collections.IList result = new System.Collections.ArrayList();
+                       for (int i = 0; i < fields.Count; i++)
+                       {
+                               Fieldable field = (Fieldable) fields[i];
+                               if (field.Name().Equals(name) && (field.IsBinary()))
+                                       result.Add(field.BinaryValue());
+                       }
+                       
+                       if (result.Count == 0)
+                               return NO_BYTES;
+                       
+            System.Collections.ICollection c = result;
+            object[] objects = new byte[result.Count][];
+
+            System.Type type = objects.GetType().GetElementType();
+            object[] objs = (object[])Array.CreateInstance(type, c.Count);
+
+            System.Collections.IEnumerator e = c.GetEnumerator();
+            int ii = 0;
+
+            while (e.MoveNext())
+                objs[ii++] = e.Current;
+
+            // If objects is smaller than c then do not return the new array in the parameter
+            if (objects.Length >= c.Count)
+                objs.CopyTo(objects, 0);
+
+            return (byte[][])objs;
+        }
+               
+               /// <summary> Returns an array of bytes for the first (or only) field that has the name
+               /// specified as the method parameter. This method will return <code>null</code>
+               /// if no binary fields with the specified name are available.
+               /// There may be non-binary fields with the same name.
+               /// 
+               /// </summary>
+               /// <param name="name">the name of the field.
+               /// </param>
+               /// <returns> a <code>byte[]</code> containing the binary field value or <code>null</code>
+               /// </returns>
+               public byte[] GetBinaryValue(System.String name)
+               {
+                       for (int i = 0; i < fields.Count; i++)
+                       {
+                               Fieldable field = (Fieldable) fields[i];
+                               if (field.Name().Equals(name) && (field.IsBinary()))
+                                       return field.BinaryValue();
+                       }
+                       return null;
+               }
+               
+               /// <summary>Prints the fields of a document for human consumption. </summary>
+               public override System.String ToString()
+               {
+                       System.Text.StringBuilder buffer = new System.Text.StringBuilder();
+                       buffer.Append("Document<");
+                       for (int i = 0; i < fields.Count; i++)
+                       {
+                               Fieldable field = (Fieldable) fields[i];
+                               buffer.Append(field.ToString());
+                               if (i != fields.Count - 1)
+                                       buffer.Append(" ");
+                       }
+                       buffer.Append(">");
+                       return buffer.ToString();
+               }
+
+        public System.Collections.IList fields_ForNUnit
+        {
+            get { return fields; }
+        }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Document/Field.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Document/Field.cs
new file mode 100644 (file)
index 0000000..5e47004
--- /dev/null
@@ -0,0 +1,648 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using TokenStream = Mono.Lucene.Net.Analysis.TokenStream;
+using IndexWriter = Mono.Lucene.Net.Index.IndexWriter;
+using Parameter = Mono.Lucene.Net.Util.Parameter;
+using StringHelper = Mono.Lucene.Net.Util.StringHelper;
+
+namespace Mono.Lucene.Net.Documents
+{
+       
+       /// <summary>A field is a section of a Document.  Each field has two parts, a name and a
+       /// value.  Values may be free text, provided as a String or as a Reader, or they
+       /// may be atomic keywords, which are not further processed.  Such keywords may
+       /// be used to represent dates, urls, etc.  Fields are optionally stored in the
+       /// index, so that they may be returned with hits on the document.
+       /// </summary>
+       
+       [Serializable]
+       public sealed class Field:AbstractField, Fieldable
+       {
+               
+               /// <summary>Specifies whether and how a field should be stored. </summary>
+               [Serializable]
+               public sealed class Store:Parameter
+               {
+                       
+                       internal Store(System.String name):base(name)
+                       {
+                       }
+                       
+                       /// <summary>Store the original field value in the index in a compressed form. This is
+                       /// useful for long documents and for binary valued fields.
+                       /// </summary>
+                       /// <deprecated> Please use {@link CompressionTools} instead.
+                       /// For string fields that were previously indexed and stored using compression,
+                       /// the new way to achieve this is: First add the field indexed-only (no store)
+                       /// and additionally using the same field name as a binary, stored field
+                       /// with {@link CompressionTools#compressString}.
+                       /// </deprecated>
+                       public static readonly Store COMPRESS = new Store("COMPRESS");
+                       
+                       /// <summary>Store the original field value in the index. This is useful for short texts
+                       /// like a document's title which should be displayed with the results. The
+                       /// value is stored in its original form, i.e. no analyzer is used before it is
+                       /// stored.
+                       /// </summary>
+                       public static readonly Store YES = new Store("YES");
+                       
+                       /// <summary>Do not store the field value in the index. </summary>
+                       public static readonly Store NO = new Store("NO");
+               }
+               
+               /// <summary>Specifies whether and how a field should be indexed. </summary>
+               [Serializable]
+               public sealed class Index:Parameter
+               {
+                       
+                       internal Index(System.String name):base(name)
+                       {
+                       }
+                       
+                       /// <summary>Do not index the field value. This field can thus not be searched,
+                       /// but one can still access its contents provided it is
+                       /// {@link Field.Store stored}. 
+                       /// </summary>
+                       public static readonly Index NO = new Index("NO");
+                       
+                       /// <summary>Index the tokens produced by running the field's
+                       /// value through an Analyzer.  This is useful for
+                       /// common text. 
+                       /// </summary>
+                       public static readonly Index ANALYZED = new Index("ANALYZED");
+                       
+                       /// <deprecated> this has been renamed to {@link #ANALYZED} 
+                       /// </deprecated>
+            [Obsolete("this has been renamed to ANALYZED")]
+                       public static readonly Index TOKENIZED;
+                       
+                       /// <summary>Index the field's value without using an Analyzer, so it can be searched.
+                       /// As no analyzer is used the value will be stored as a single term. This is
+                       /// useful for unique Ids like product numbers.
+                       /// </summary>
+                       public static readonly Index NOT_ANALYZED = new Index("NOT_ANALYZED");
+                       
+                       /// <deprecated> This has been renamed to {@link #NOT_ANALYZED} 
+                       /// </deprecated>
+            [Obsolete("This has been renamed to NOT_ANALYZED")]
+                       public static readonly Index UN_TOKENIZED;
+                       
+                       /// <summary>Expert: Index the field's value without an Analyzer,
+                       /// and also disable the storing of norms.  Note that you
+                       /// can also separately enable/disable norms by calling
+                       /// {@link Field#setOmitNorms}.  No norms means that
+                       /// index-time field and document boosting and field
+                       /// length normalization are disabled.  The benefit is
+                       /// less memory usage as norms take up one byte of RAM
+                       /// per indexed field for every document in the index,
+                       /// during searching.  Note that once you index a given
+                       /// field <i>with</i> norms enabled, disabling norms will
+                       /// have no effect.  In other words, for this to have the
+                       /// above described effect on a field, all instances of
+                       /// that field must be indexed with NOT_ANALYZED_NO_NORMS
+                       /// from the beginning. 
+                       /// </summary>
+                       public static readonly Index NOT_ANALYZED_NO_NORMS = new Index("NOT_ANALYZED_NO_NORMS");
+                       
+                       /// <deprecated> This has been renamed to
+                       /// {@link #NOT_ANALYZED_NO_NORMS} 
+                       /// </deprecated>
+            [Obsolete("This has been renamed to NOT_ANALYZED_NO_NORMS")]
+                       public static readonly Index NO_NORMS;
+                       
+                       /// <summary>Expert: Index the tokens produced by running the
+                       /// field's value through an Analyzer, and also
+                       /// separately disable the storing of norms.  See
+                       /// {@link #NOT_ANALYZED_NO_NORMS} for what norms are
+                       /// and why you may want to disable them. 
+                       /// </summary>
+                       public static readonly Index ANALYZED_NO_NORMS = new Index("ANALYZED_NO_NORMS");
+                       static Index()
+                       {
+                               TOKENIZED = ANALYZED;
+                               UN_TOKENIZED = NOT_ANALYZED;
+                               NO_NORMS = NOT_ANALYZED_NO_NORMS;
+                       }
+               }
+               
+               /// <summary>Specifies whether and how a field should have term vectors. </summary>
+               [Serializable]
+               public sealed class TermVector:Parameter
+               {
+                       
+                       internal TermVector(System.String name):base(name)
+                       {
+                       }
+                       
+                       /// <summary>Do not store term vectors. </summary>
+                       public static readonly TermVector NO = new TermVector("NO");
+                       
+                       /// <summary>Store the term vectors of each document. A term vector is a list
+                       /// of the document's terms and their number of occurrences in that document. 
+                       /// </summary>
+                       public static readonly TermVector YES = new TermVector("YES");
+                       
+                       /// <summary> Store the term vector + token position information
+                       /// 
+                       /// </summary>
+                       /// <seealso cref="YES">
+                       /// </seealso>
+                       public static readonly TermVector WITH_POSITIONS = new TermVector("WITH_POSITIONS");
+                       
+                       /// <summary> Store the term vector + Token offset information
+                       /// 
+                       /// </summary>
+                       /// <seealso cref="YES">
+                       /// </seealso>
+                       public static readonly TermVector WITH_OFFSETS = new TermVector("WITH_OFFSETS");
+                       
+                       /// <summary> Store the term vector + Token position and offset information
+                       /// 
+                       /// </summary>
+                       /// <seealso cref="YES">
+                       /// </seealso>
+                       /// <seealso cref="WITH_POSITIONS">
+                       /// </seealso>
+                       /// <seealso cref="WITH_OFFSETS">
+                       /// </seealso>
+                       public static readonly TermVector WITH_POSITIONS_OFFSETS = new TermVector("WITH_POSITIONS_OFFSETS");
+               }
+               
+               
+               /// <summary>The value of the field as a String, or null.  If null, the Reader value or
+               /// binary value is used.  Exactly one of stringValue(),
+               /// readerValue(), and getBinaryValue() must be set. 
+               /// </summary>
+               public override System.String StringValue()
+               {
+                       return fieldsData is System.String?(System.String) fieldsData:null;
+               }
+               
+               /// <summary>The value of the field as a Reader, or null.  If null, the String value or
+               /// binary value is used.  Exactly one of stringValue(),
+               /// readerValue(), and getBinaryValue() must be set. 
+               /// </summary>
+               public override System.IO.TextReader ReaderValue()
+               {
+                       return fieldsData is System.IO.TextReader?(System.IO.TextReader) fieldsData:null;
+               }
+               
+               /// <summary>The value of the field in Binary, or null.  If null, the Reader value,
+               /// or String value is used. Exactly one of stringValue(),
+               /// readerValue(), and getBinaryValue() must be set.
+               /// </summary>
+               /// <deprecated> This method must allocate a new byte[] if
+               /// the {@link AbstractField#GetBinaryOffset()} is non-zero
+               /// or {@link AbstractField#GetBinaryLength()} is not the
+               /// full length of the byte[]. Please use {@link
+               /// AbstractField#GetBinaryValue()} instead, which simply
+               /// returns the byte[].
+               /// </deprecated>
+        [Obsolete("This method must allocate a new byte[] if the AbstractField.GetBinaryOffset() is non-zero or AbstractField.GetBinaryLength() is not the full length of the byte[]. Please use AbstractField.GetBinaryValue() instead, which simply returns the byte[].")]
+               public override byte[] BinaryValue()
+               {
+                       if (!isBinary)
+                               return null;
+                       byte[] data = (byte[]) fieldsData;
+                       if (binaryOffset == 0 && data.Length == binaryLength)
+                               return data; //Optimization
+                       
+                       byte[] ret = new byte[binaryLength];
+                       Array.Copy(data, binaryOffset, ret, 0, binaryLength);
+                       return ret;
+               }
+               
+               /// <summary>The TokesStream for this field to be used when indexing, or null.  If null, the Reader value
+               /// or String value is analyzed to produce the indexed tokens. 
+               /// </summary>
+               public override TokenStream TokenStreamValue()
+               {
+                       return tokenStream;
+               }
+               
+               
+               /// <summary><p/>Expert: change the value of this field.  This can
+               /// be used during indexing to re-use a single Field
+               /// instance to improve indexing speed by avoiding GC cost
+               /// of new'ing and reclaiming Field instances.  Typically
+               /// a single {@link Document} instance is re-used as
+               /// well.  This helps most on small documents.<p/>
+               /// 
+               /// <p/>Each Field instance should only be used once
+               /// within a single {@link Document} instance.  See <a
+               /// href="http://wiki.apache.org/lucene-java/ImproveIndexingSpeed">ImproveIndexingSpeed</a>
+               /// for details.<p/> 
+               /// </summary>
+               public void  SetValue(System.String value_Renamed)
+               {
+                       if (isBinary)
+                       {
+                               throw new System.ArgumentException("cannot set a String value on a binary field");
+                       }
+                       fieldsData = value_Renamed;
+               }
+               
+               /// <summary>Expert: change the value of this field.  See <a href="#setValue(java.lang.String)">setValue(String)</a>. </summary>
+               public void  SetValue(System.IO.TextReader value_Renamed)
+               {
+                       if (isBinary)
+                       {
+                               throw new System.ArgumentException("cannot set a Reader value on a binary field");
+                       }
+                       if (isStored)
+                       {
+                               throw new System.ArgumentException("cannot set a Reader value on a stored field");
+                       }
+                       fieldsData = value_Renamed;
+               }
+               
+               /// <summary>Expert: change the value of this field.  See <a href="#setValue(java.lang.String)">setValue(String)</a>. </summary>
+               public void  SetValue(byte[] value_Renamed)
+               {
+                       if (!isBinary)
+                       {
+                               throw new System.ArgumentException("cannot set a byte[] value on a non-binary field");
+                       }
+                       fieldsData = value_Renamed;
+                       binaryLength = value_Renamed.Length;
+                       binaryOffset = 0;
+               }
+               
+               /// <summary>Expert: change the value of this field.  See <a href="#setValue(java.lang.String)">setValue(String)</a>. </summary>
+               public void  SetValue(byte[] value_Renamed, int offset, int length)
+               {
+                       if (!isBinary)
+                       {
+                               throw new System.ArgumentException("cannot set a byte[] value on a non-binary field");
+                       }
+                       fieldsData = value_Renamed;
+                       binaryLength = length;
+                       binaryOffset = offset;
+               }
+               
+               
+               /// <summary>Expert: change the value of this field.  See <a href="#setValue(java.lang.String)">setValue(String)</a>.</summary>
+               /// <deprecated> use {@link #setTokenStream} 
+               /// </deprecated>
+        [Obsolete("use SetTokenStream ")]
+               public void  SetValue(TokenStream value_Renamed)
+               {
+                       if (isBinary)
+                       {
+                               throw new System.ArgumentException("cannot set a TokenStream value on a binary field");
+                       }
+                       if (isStored)
+                       {
+                               throw new System.ArgumentException("cannot set a TokenStream value on a stored field");
+                       }
+                       fieldsData = null;
+                       tokenStream = value_Renamed;
+               }
+               
+               /// <summary>Expert: sets the token stream to be used for indexing and causes isIndexed() and isTokenized() to return true.
+               /// May be combined with stored values from stringValue() or binaryValue() 
+               /// </summary>
+               public void  SetTokenStream(TokenStream tokenStream)
+               {
+                       this.isIndexed = true;
+                       this.isTokenized = true;
+                       this.tokenStream = tokenStream;
+               }
+               
+               /// <summary> Create a field by specifying its name, value and how it will
+               /// be saved in the index. Term vectors will not be stored in the index.
+               /// 
+               /// </summary>
+               /// <param name="name">The name of the field
+               /// </param>
+               /// <param name="value">The string to process
+               /// </param>
+               /// <param name="store">Whether <code>value</code> should be stored in the index
+               /// </param>
+               /// <param name="index">Whether the field should be indexed, and if so, if it should
+               /// be tokenized before indexing 
+               /// </param>
+               /// <throws>  NullPointerException if name or value is <code>null</code> </throws>
+               /// <throws>  IllegalArgumentException if the field is neither stored nor indexed  </throws>
+               public Field(System.String name, System.String value_Renamed, Store store, Index index):this(name, value_Renamed, store, index, TermVector.NO)
+               {
+               }
+               
+               /// <summary> Create a field by specifying its name, value and how it will
+               /// be saved in the index.
+               /// 
+               /// </summary>
+               /// <param name="name">The name of the field
+               /// </param>
+               /// <param name="value">The string to process
+               /// </param>
+               /// <param name="store">Whether <code>value</code> should be stored in the index
+               /// </param>
+               /// <param name="index">Whether the field should be indexed, and if so, if it should
+               /// be tokenized before indexing 
+               /// </param>
+               /// <param name="termVector">Whether term vector should be stored
+               /// </param>
+               /// <throws>  NullPointerException if name or value is <code>null</code> </throws>
+               /// <throws>  IllegalArgumentException in any of the following situations: </throws>
+               /// <summary> <ul> 
+               /// <li>the field is neither stored nor indexed</li> 
+               /// <li>the field is not indexed but termVector is <code>TermVector.YES</code></li>
+               /// </ul> 
+               /// </summary>
+               public Field(System.String name, System.String value_Renamed, Store store, Index index, TermVector termVector):this(name, true, value_Renamed, store, index, termVector)
+               {
+               }
+               
+               /// <summary> Create a field by specifying its name, value and how it will
+               /// be saved in the index.
+               /// 
+               /// </summary>
+               /// <param name="name">The name of the field
+               /// </param>
+               /// <param name="internName">Whether to .intern() name or not
+               /// </param>
+               /// <param name="value">The string to process
+               /// </param>
+               /// <param name="store">Whether <code>value</code> should be stored in the index
+               /// </param>
+               /// <param name="index">Whether the field should be indexed, and if so, if it should
+               /// be tokenized before indexing 
+               /// </param>
+               /// <param name="termVector">Whether term vector should be stored
+               /// </param>
+               /// <throws>  NullPointerException if name or value is <code>null</code> </throws>
+               /// <throws>  IllegalArgumentException in any of the following situations: </throws>
+               /// <summary> <ul> 
+               /// <li>the field is neither stored nor indexed</li> 
+               /// <li>the field is not indexed but termVector is <code>TermVector.YES</code></li>
+               /// </ul> 
+               /// </summary>
+               public Field(System.String name, bool internName, System.String value_Renamed, Store store, Index index, TermVector termVector)
+               {
+                       if (name == null)
+                               throw new System.NullReferenceException("name cannot be null");
+                       if (value_Renamed == null)
+                               throw new System.NullReferenceException("value cannot be null");
+                       if (name.Length == 0 && value_Renamed.Length == 0)
+                               throw new System.ArgumentException("name and value cannot both be empty");
+                       if (index == Index.NO && store == Store.NO)
+                               throw new System.ArgumentException("it doesn't make sense to have a field that " + "is neither indexed nor stored");
+                       if (index == Index.NO && termVector != TermVector.NO)
+                               throw new System.ArgumentException("cannot store term vector information " + "for a field that is not indexed");
+                       
+                       if (internName)
+                       // field names are optionally interned
+                               name = StringHelper.Intern(name);
+                       
+                       this.name = name;
+                       
+                       this.fieldsData = value_Renamed;
+                       
+                       if (store == Store.YES)
+                       {
+                               this.isStored = true;
+                               this.isCompressed = false;
+                       }
+                       else if (store == Store.COMPRESS)
+                       {
+                               this.isStored = true;
+                               this.isCompressed = true;
+                       }
+                       else if (store == Store.NO)
+                       {
+                               this.isStored = false;
+                               this.isCompressed = false;
+                       }
+                       else
+                       {
+                               throw new System.ArgumentException("unknown store parameter " + store);
+                       }
+                       
+                       if (index == Index.NO)
+                       {
+                               this.isIndexed = false;
+                               this.isTokenized = false;
+                               this.omitTermFreqAndPositions = false;
+                               this.omitNorms = true;
+                       }
+                       else if (index == Index.ANALYZED)
+                       {
+                               this.isIndexed = true;
+                               this.isTokenized = true;
+                       }
+                       else if (index == Index.NOT_ANALYZED)
+                       {
+                               this.isIndexed = true;
+                               this.isTokenized = false;
+                       }
+                       else if (index == Index.NOT_ANALYZED_NO_NORMS)
+                       {
+                               this.isIndexed = true;
+                               this.isTokenized = false;
+                               this.omitNorms = true;
+                       }
+                       else if (index == Index.ANALYZED_NO_NORMS)
+                       {
+                               this.isIndexed = true;
+                               this.isTokenized = true;
+                               this.omitNorms = true;
+                       }
+                       else
+                       {
+                               throw new System.ArgumentException("unknown index parameter " + index);
+                       }
+                       
+                       this.isBinary = false;
+                       
+                       SetStoreTermVector(termVector);
+               }
+               
+               /// <summary> Create a tokenized and indexed field that is not stored. Term vectors will
+               /// not be stored.  The Reader is read only when the Document is added to the index,
+               /// i.e. you may not close the Reader until {@link IndexWriter#AddDocument(Document)}
+               /// has been called.
+               /// 
+               /// </summary>
+               /// <param name="name">The name of the field
+               /// </param>
+               /// <param name="reader">The reader with the content
+               /// </param>
+               /// <throws>  NullPointerException if name or reader is <code>null</code> </throws>
+               public Field(System.String name, System.IO.TextReader reader):this(name, reader, TermVector.NO)
+               {
+               }
+               
+               /// <summary> Create a tokenized and indexed field that is not stored, optionally with 
+               /// storing term vectors.  The Reader is read only when the Document is added to the index,
+               /// i.e. you may not close the Reader until {@link IndexWriter#AddDocument(Document)}
+               /// has been called.
+               /// 
+               /// </summary>
+               /// <param name="name">The name of the field
+               /// </param>
+               /// <param name="reader">The reader with the content
+               /// </param>
+               /// <param name="termVector">Whether term vector should be stored
+               /// </param>
+               /// <throws>  NullPointerException if name or reader is <code>null</code> </throws>
+               public Field(System.String name, System.IO.TextReader reader, TermVector termVector)
+               {
+                       if (name == null)
+                               throw new System.NullReferenceException("name cannot be null");
+                       if (reader == null)
+                               throw new System.NullReferenceException("reader cannot be null");
+                       
+                       this.name = StringHelper.Intern(name); // field names are interned
+                       this.fieldsData = reader;
+                       
+                       this.isStored = false;
+                       this.isCompressed = false;
+                       
+                       this.isIndexed = true;
+                       this.isTokenized = true;
+                       
+                       this.isBinary = false;
+                       
+                       SetStoreTermVector(termVector);
+               }
+               
+               /// <summary> Create a tokenized and indexed field that is not stored. Term vectors will
+               /// not be stored. This is useful for pre-analyzed fields.
+               /// The TokenStream is read only when the Document is added to the index,
+               /// i.e. you may not close the TokenStream until {@link IndexWriter#AddDocument(Document)}
+               /// has been called.
+               /// 
+               /// </summary>
+               /// <param name="name">The name of the field
+               /// </param>
+               /// <param name="tokenStream">The TokenStream with the content
+               /// </param>
+               /// <throws>  NullPointerException if name or tokenStream is <code>null</code> </throws>
+               public Field(System.String name, TokenStream tokenStream):this(name, tokenStream, TermVector.NO)
+               {
+               }
+               
+               /// <summary> Create a tokenized and indexed field that is not stored, optionally with 
+               /// storing term vectors.  This is useful for pre-analyzed fields.
+               /// The TokenStream is read only when the Document is added to the index,
+               /// i.e. you may not close the TokenStream until {@link IndexWriter#AddDocument(Document)}
+               /// has been called.
+               /// 
+               /// </summary>
+               /// <param name="name">The name of the field
+               /// </param>
+               /// <param name="tokenStream">The TokenStream with the content
+               /// </param>
+               /// <param name="termVector">Whether term vector should be stored
+               /// </param>
+               /// <throws>  NullPointerException if name or tokenStream is <code>null</code> </throws>
+               public Field(System.String name, TokenStream tokenStream, TermVector termVector)
+               {
+                       if (name == null)
+                               throw new System.NullReferenceException("name cannot be null");
+                       if (tokenStream == null)
+                               throw new System.NullReferenceException("tokenStream cannot be null");
+                       
+                       this.name = StringHelper.Intern(name); // field names are interned
+                       this.fieldsData = null;
+                       this.tokenStream = tokenStream;
+                       
+                       this.isStored = false;
+                       this.isCompressed = false;
+                       
+                       this.isIndexed = true;
+                       this.isTokenized = true;
+                       
+                       this.isBinary = false;
+                       
+                       SetStoreTermVector(termVector);
+               }
+               
+               
+               /// <summary> Create a stored field with binary value. Optionally the value may be compressed.
+               /// 
+               /// </summary>
+               /// <param name="name">The name of the field
+               /// </param>
+               /// <param name="value">The binary value
+               /// </param>
+               /// <param name="store">How <code>value</code> should be stored (compressed or not)
+               /// </param>
+               /// <throws>  IllegalArgumentException if store is <code>Store.NO</code>  </throws>
+               public Field(System.String name, byte[] value_Renamed, Store store):this(name, value_Renamed, 0, value_Renamed.Length, store)
+               {
+               }
+               
+               /// <summary> Create a stored field with binary value. Optionally the value may be compressed.
+               /// 
+               /// </summary>
+               /// <param name="name">The name of the field
+               /// </param>
+               /// <param name="value">The binary value
+               /// </param>
+               /// <param name="offset">Starting offset in value where this Field's bytes are
+               /// </param>
+               /// <param name="length">Number of bytes to use for this Field, starting at offset
+               /// </param>
+               /// <param name="store">How <code>value</code> should be stored (compressed or not)
+               /// </param>
+               /// <throws>  IllegalArgumentException if store is <code>Store.NO</code>  </throws>
+               public Field(System.String name, byte[] value_Renamed, int offset, int length, Store store)
+               {
+                       
+                       if (name == null)
+                               throw new System.ArgumentException("name cannot be null");
+                       if (value_Renamed == null)
+                               throw new System.ArgumentException("value cannot be null");
+                       
+                       this.name = StringHelper.Intern(name); // field names are interned
+                       fieldsData = value_Renamed;
+                       
+                       if (store == Store.YES)
+                       {
+                               isStored = true;
+                               isCompressed = false;
+                       }
+                       else if (store == Store.COMPRESS)
+                       {
+                               isStored = true;
+                               isCompressed = true;
+                       }
+                       else if (store == Store.NO)
+                               throw new System.ArgumentException("binary values can't be unstored");
+                       else
+                       {
+                               throw new System.ArgumentException("unknown store parameter " + store);
+                       }
+                       
+                       isIndexed = false;
+                       isTokenized = false;
+                       omitTermFreqAndPositions = false;
+                       omitNorms = true;
+                       
+                       isBinary = true;
+                       binaryLength = length;
+                       binaryOffset = offset;
+                       
+                       SetStoreTermVector(TermVector.NO);
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Document/FieldSelector.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Document/FieldSelector.cs
new file mode 100644 (file)
index 0000000..0379544
--- /dev/null
@@ -0,0 +1,38 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Documents
+{
+       /// <summary> Similar to a {@link java.io.FileFilter}, the FieldSelector allows one to make decisions about
+       /// what Fields get loaded on a {@link Document} by {@link Mono.Lucene.Net.Index.IndexReader#Document(int,Mono.Lucene.Net.Documents.FieldSelector)}
+       /// 
+       /// 
+       /// </summary>
+       public interface FieldSelector
+       {
+               
+               /// <summary> </summary>
+               /// <param name="fieldName">the field to accept or reject
+               /// </param>
+               /// <returns> an instance of {@link FieldSelectorResult}
+               /// if the {@link Field} named <code>fieldName</code> should be loaded.
+               /// </returns>
+               FieldSelectorResult Accept(System.String fieldName);
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Document/FieldSelectorResult.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Document/FieldSelectorResult.cs
new file mode 100644 (file)
index 0000000..04b26cb
--- /dev/null
@@ -0,0 +1,117 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using System.Runtime.InteropServices;
+
+namespace Mono.Lucene.Net.Documents
+{
+       /// <summary>  Provides information about what should be done with this Field 
+       /// 
+       /// 
+       /// </summary>
+       //Replace with an enumerated type in 1.5
+       [Serializable]
+       public sealed class FieldSelectorResult
+       {
+               
+               /// <summary> Load this {@link Field} every time the {@link Document} is loaded, reading in the data as it is encountered.
+               /// {@link Document#GetField(String)} and {@link Document#GetFieldable(String)} should not return null.
+               /// <p/>
+               /// {@link Document#Add(Fieldable)} should be called by the Reader.
+               /// </summary>
+               [NonSerialized]
+               public static readonly FieldSelectorResult LOAD = new FieldSelectorResult(0);
+               /// <summary> Lazily load this {@link Field}.  This means the {@link Field} is valid, but it may not actually contain its data until
+               /// invoked.  {@link Document#GetField(String)} SHOULD NOT BE USED.  {@link Document#GetFieldable(String)} is safe to use and should
+               /// return a valid instance of a {@link Fieldable}.
+               /// <p/>
+               /// {@link Document#Add(Fieldable)} should be called by the Reader.
+               /// </summary>
+               [NonSerialized]
+               public static readonly FieldSelectorResult LAZY_LOAD = new FieldSelectorResult(1);
+               /// <summary> Do not load the {@link Field}.  {@link Document#GetField(String)} and {@link Document#GetFieldable(String)} should return null.
+               /// {@link Document#Add(Fieldable)} is not called.
+               /// <p/>
+               /// {@link Document#Add(Fieldable)} should not be called by the Reader.
+               /// </summary>
+               [NonSerialized]
+               public static readonly FieldSelectorResult NO_LOAD = new FieldSelectorResult(2);
+               /// <summary> Load this field as in the {@link #LOAD} case, but immediately return from {@link Field} loading for the {@link Document}.  Thus, the
+               /// Document may not have its complete set of Fields.  {@link Document#GetField(String)} and {@link Document#GetFieldable(String)} should
+               /// both be valid for this {@link Field}
+               /// <p/>
+               /// {@link Document#Add(Fieldable)} should be called by the Reader.
+               /// </summary>
+               [NonSerialized]
+               public static readonly FieldSelectorResult LOAD_AND_BREAK = new FieldSelectorResult(3);
+               /// <summary> Behaves much like {@link #LOAD} but does not uncompress any compressed data.  This is used for internal purposes.
+               /// {@link Document#GetField(String)} and {@link Document#GetFieldable(String)} should not return null.
+               /// <p/>
+               /// {@link Document#Add(Fieldable)} should be called by
+               /// the Reader.
+               /// </summary>
+               /// <deprecated> This is an internal option only, and is
+               /// no longer needed now that {@link CompressionTools}
+               /// is used for field compression.
+               /// </deprecated>
+        [Obsolete("This is an internal option only, and is no longer needed now that CompressionTools is used for field compression.")]
+               [NonSerialized]
+               public static readonly FieldSelectorResult LOAD_FOR_MERGE = new FieldSelectorResult(4);
+               
+               /// <summary>Expert:  Load the size of this {@link Field} rather than its value.
+               /// Size is measured as number of bytes required to store the field == bytes for a binary or any compressed value, and 2*chars for a String value.
+               /// The size is stored as a binary value, represented as an int in a byte[], with the higher order byte first in [0]
+               /// </summary>
+               [NonSerialized]
+               public static readonly FieldSelectorResult SIZE = new FieldSelectorResult(5);
+               
+               /// <summary>Expert: Like {@link #SIZE} but immediately break from the field loading loop, i.e., stop loading further fields, after the size is loaded </summary>
+               [NonSerialized]
+               public static readonly FieldSelectorResult SIZE_AND_BREAK = new FieldSelectorResult(6);
+               
+               
+               
+               private int id;
+               
+               private FieldSelectorResult(int id)
+               {
+                       this.id = id;
+               }
+               
+               public  override bool Equals(System.Object o)
+               {
+                       if (this == o)
+                               return true;
+                       if (o == null || GetType() != o.GetType())
+                               return false;
+                       
+                       FieldSelectorResult that = (FieldSelectorResult) o;
+                       
+                       if (id != that.id)
+                               return false;
+                       
+                       return true;
+               }
+               
+               public override int GetHashCode()
+               {
+                       return id;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Document/Fieldable.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Document/Fieldable.cs
new file mode 100644 (file)
index 0000000..29c9b6c
--- /dev/null
@@ -0,0 +1,221 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using TokenStream = Mono.Lucene.Net.Analysis.TokenStream;
+using FieldInvertState = Mono.Lucene.Net.Index.FieldInvertState;
+
+namespace Mono.Lucene.Net.Documents
+{
+       
+       /// <summary> Synonymous with {@link Field}.
+       /// 
+       /// <p/><bold>WARNING</bold>: This interface may change within minor versions, despite Lucene's backward compatibility requirements.
+       /// This means new methods may be added from version to version.  This change only affects the Fieldable API; other backwards
+       /// compatibility promises remain intact. For example, Lucene can still
+       /// read and write indices created within the same major version.
+       /// <p/>
+       /// 
+       /// 
+       /// </summary>
+       public interface Fieldable
+       {
+               /// <summary>Sets the boost factor hits on this field.  This value will be
+               /// multiplied into the score of all hits on this this field of this
+               /// document.
+               /// 
+               /// <p/>The boost is multiplied by {@link Mono.Lucene.Net.Documents.Document#GetBoost()} of the document
+               /// containing this field.  If a document has multiple fields with the same
+               /// name, all such values are multiplied together.  This product is then
+               /// used to compute the norm factor for the field.  By
+               /// default, in the {@link
+               /// Mono.Lucene.Net.Search.Similarity#ComputeNorm(String,
+               /// FieldInvertState)} method, the boost value is multiplied
+               /// by the {@link
+               /// Mono.Lucene.Net.Search.Similarity#LengthNorm(String,
+               /// int)} and then rounded by {@link Mono.Lucene.Net.Search.Similarity#EncodeNorm(float)} before it is stored in the
+               /// index.  One should attempt to ensure that this product does not overflow
+               /// the range of that encoding.
+               /// 
+               /// </summary>
+               /// <seealso cref="Mono.Lucene.Net.Documents.Document.SetBoost(float)">
+               /// </seealso>
+               /// <seealso cref="Mono.Lucene.Net.Search.Similarity.ComputeNorm(String, FieldInvertState)">
+               /// </seealso>
+               /// <seealso cref="Mono.Lucene.Net.Search.Similarity.EncodeNorm(float)">
+               /// </seealso>
+               void  SetBoost(float boost);
+               
+               /// <summary>Returns the boost factor for hits for this field.
+               /// 
+               /// <p/>The default value is 1.0.
+               /// 
+               /// <p/>Note: this value is not stored directly with the document in the index.
+               /// Documents returned from {@link Mono.Lucene.Net.Index.IndexReader#Document(int)} and
+               /// {@link Mono.Lucene.Net.Search.Hits#Doc(int)} may thus not have the same value present as when
+               /// this field was indexed.
+               /// 
+               /// </summary>
+               /// <seealso cref="SetBoost(float)">
+               /// </seealso>
+               float GetBoost();
+               
+               /// <summary>Returns the name of the field as an interned string.
+               /// For example "date", "title", "body", ...
+               /// </summary>
+               System.String Name();
+               
+               /// <summary>The value of the field as a String, or null.
+               /// <p/>
+               /// For indexing, if isStored()==true, the stringValue() will be used as the stored field value
+               /// unless isBinary()==true, in which case binaryValue() will be used.
+               /// 
+               /// If isIndexed()==true and isTokenized()==false, this String value will be indexed as a single token.
+               /// If isIndexed()==true and isTokenized()==true, then tokenStreamValue() will be used to generate indexed tokens if not null,
+               /// else readerValue() will be used to generate indexed tokens if not null, else stringValue() will be used to generate tokens.
+               /// </summary>
+               System.String StringValue();
+               
+               /// <summary>The value of the field as a Reader, which can be used at index time to generate indexed tokens.</summary>
+               /// <seealso cref="StringValue()">
+               /// </seealso>
+               System.IO.TextReader ReaderValue();
+               
+               /// <summary>The value of the field in Binary, or null.</summary>
+               /// <seealso cref="StringValue()">
+               /// </seealso>
+               byte[] BinaryValue();
+               
+               /// <summary>The TokenStream for this field to be used when indexing, or null.</summary>
+               /// <seealso cref="StringValue()">
+               /// </seealso>
+               TokenStream TokenStreamValue();
+               
+               /// <summary>True if the value of the field is to be stored in the index for return
+               /// with search hits. 
+               /// </summary>
+               bool IsStored();
+               
+               /// <summary>True if the value of the field is to be indexed, so that it may be
+               /// searched on. 
+               /// </summary>
+               bool IsIndexed();
+               
+               /// <summary>True if the value of the field should be tokenized as text prior to
+               /// indexing.  Un-tokenized fields are indexed as a single word and may not be
+               /// Reader-valued. 
+               /// </summary>
+               bool IsTokenized();
+               
+               /// <summary>True if the value of the field is stored and compressed within the index </summary>
+               bool IsCompressed();
+               
+               /// <summary>True if the term or terms used to index this field are stored as a term
+               /// vector, available from {@link Mono.Lucene.Net.Index.IndexReader#GetTermFreqVector(int,String)}.
+               /// These methods do not provide access to the original content of the field,
+               /// only to terms used to index it. If the original content must be
+               /// preserved, use the <code>stored</code> attribute instead.
+               /// 
+               /// </summary>
+               /// <seealso cref="Mono.Lucene.Net.Index.IndexReader.GetTermFreqVector(int, String)">
+               /// </seealso>
+               bool IsTermVectorStored();
+               
+               /// <summary> True if terms are stored as term vector together with their offsets 
+               /// (start and end positon in source text).
+               /// </summary>
+               bool IsStoreOffsetWithTermVector();
+               
+               /// <summary> True if terms are stored as term vector together with their token positions.</summary>
+               bool IsStorePositionWithTermVector();
+               
+               /// <summary>True if the value of the field is stored as binary </summary>
+               bool IsBinary();
+               
+               /// <summary>True if norms are omitted for this indexed field </summary>
+               bool GetOmitNorms();
+               
+               /// <summary>Expert:
+               /// 
+               /// If set, omit normalization factors associated with this indexed field.
+               /// This effectively disables indexing boosts and length normalization for this field.
+               /// </summary>
+               void  SetOmitNorms(bool omitNorms);
+               
+               /// <deprecated> Renamed to {@link AbstractField#setOmitTermFreqAndPositions} 
+               /// </deprecated>
+        [Obsolete("Renamed to AbstractField.SetOmitTermFreqAndPositions")]
+               void  SetOmitTf(bool omitTf);
+               
+               /// <deprecated> Renamed to {@link AbstractField#getOmitTermFreqAndPositions} 
+               /// </deprecated>
+        [Obsolete("Renamed to AbstractField.GetOmitTermFreqAndPositions")]
+               bool GetOmitTf();
+               
+               /// <summary> Indicates whether a Field is Lazy or not.  The semantics of Lazy loading are such that if a Field is lazily loaded, retrieving
+               /// it's values via {@link #StringValue()} or {@link #BinaryValue()} is only valid as long as the {@link Mono.Lucene.Net.Index.IndexReader} that
+               /// retrieved the {@link Document} is still open.
+               /// 
+               /// </summary>
+               /// <returns> true if this field can be loaded lazily
+               /// </returns>
+               bool IsLazy();
+               
+               /// <summary> Returns offset into byte[] segment that is used as value, if Field is not binary
+               /// returned value is undefined
+               /// </summary>
+               /// <returns> index of the first character in byte[] segment that represents this Field value
+               /// </returns>
+               int GetBinaryOffset();
+               
+               /// <summary> Returns length of byte[] segment that is used as value, if Field is not binary
+               /// returned value is undefined
+               /// </summary>
+               /// <returns> length of byte[] segment that represents this Field value
+               /// </returns>
+               int GetBinaryLength();
+               
+               /// <summary> Return the raw byte[] for the binary field.  Note that
+               /// you must also call {@link #getBinaryLength} and {@link
+               /// #getBinaryOffset} to know which range of bytes in this
+               /// returned array belong to the field.
+               /// </summary>
+               /// <returns> reference to the Field value as byte[].
+               /// </returns>
+               byte[] GetBinaryValue();
+               
+               /// <summary> Return the raw byte[] for the binary field.  Note that
+               /// you must also call {@link #getBinaryLength} and {@link
+               /// #getBinaryOffset} to know which range of bytes in this
+               /// returned array belong to the field.<p/>
+               /// About reuse: if you pass in the result byte[] and it is
+               /// used, likely the underlying implementation will hold
+               /// onto this byte[] and return it in future calls to
+               /// {@link #BinaryValue()} or {@link #GetBinaryValue()}.
+               /// So if you subsequently re-use the same byte[] elsewhere
+               /// it will alter this Fieldable's value.
+               /// </summary>
+               /// <param name="result"> User defined buffer that will be used if
+               /// possible.  If this is null or not large enough, a new
+               /// buffer is allocated
+               /// </param>
+               /// <returns> reference to the Field value as byte[].
+               /// </returns>
+               byte[] GetBinaryValue(byte[] result);
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Document/LoadFirstFieldSelector.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Document/LoadFirstFieldSelector.cs
new file mode 100644 (file)
index 0000000..0e6e892
--- /dev/null
@@ -0,0 +1,35 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+namespace Mono.Lucene.Net.Documents
+{
+       
+       /// <summary> Load the First field and break.
+       /// <p/>
+       /// See {@link FieldSelectorResult#LOAD_AND_BREAK}
+       /// </summary>
+       [Serializable]
+       public class LoadFirstFieldSelector : FieldSelector
+       {
+               
+               public virtual FieldSelectorResult Accept(System.String fieldName)
+               {
+                       return FieldSelectorResult.LOAD_AND_BREAK;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Document/MapFieldSelector.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Document/MapFieldSelector.cs
new file mode 100644 (file)
index 0000000..323df97
--- /dev/null
@@ -0,0 +1,71 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Documents
+{
+       
+       /// <summary> A {@link FieldSelector} based on a Map of field names to {@link FieldSelectorResult}s
+       /// 
+       /// </summary>
+       [Serializable]
+       public class MapFieldSelector : FieldSelector
+       {
+               
+               internal System.Collections.IDictionary fieldSelections;
+               
+               /// <summary>Create a a MapFieldSelector</summary>
+               /// <param name="fieldSelections">maps from field names (String) to {@link FieldSelectorResult}s
+               /// </param>
+               public MapFieldSelector(System.Collections.IDictionary fieldSelections)
+               {
+                       this.fieldSelections = fieldSelections;
+               }
+               
+               /// <summary>Create a a MapFieldSelector</summary>
+               /// <param name="fields">fields to LOAD.  List of Strings.  All other fields are NO_LOAD.
+               /// </param>
+               public MapFieldSelector(System.Collections.IList fields)
+               {
+                       fieldSelections = new System.Collections.Hashtable(fields.Count * 5 / 3);
+                       for (int i = 0; i < fields.Count; i++)
+                               fieldSelections[fields[i]] = FieldSelectorResult.LOAD;
+               }
+               
+               /// <summary>Create a a MapFieldSelector</summary>
+               /// <param name="fields">fields to LOAD.  All other fields are NO_LOAD.
+               /// </param>
+               public MapFieldSelector(System.String[] fields)
+               {
+                       fieldSelections = new System.Collections.Hashtable(fields.Length * 5 / 3);
+                       for (int i = 0; i < fields.Length; i++)
+                               fieldSelections[fields[i]] = FieldSelectorResult.LOAD;
+               }
+               
+               /// <summary>Load field according to its associated value in fieldSelections</summary>
+               /// <param name="field">a field name
+               /// </param>
+               /// <returns> the fieldSelections value that field maps to or NO_LOAD if none.
+               /// </returns>
+               public virtual FieldSelectorResult Accept(System.String field)
+               {
+                       FieldSelectorResult selection = (FieldSelectorResult) fieldSelections[field];
+                       return selection != null?selection:FieldSelectorResult.NO_LOAD;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Document/NumberTools.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Document/NumberTools.cs
new file mode 100644 (file)
index 0000000..0ed35cf
--- /dev/null
@@ -0,0 +1,222 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using NumericUtils = Mono.Lucene.Net.Util.NumericUtils;
+using NumericRangeQuery = Mono.Lucene.Net.Search.NumericRangeQuery;
+
+namespace Mono.Lucene.Net.Documents
+{
+       
+       // do not remove this class in 3.0, it may be needed to decode old indexes!
+       
+       /// <summary> Provides support for converting longs to Strings, and back again. The strings
+       /// are structured so that lexicographic sorting order is preserved.
+       /// 
+       /// <p/>
+       /// That is, if l1 is less than l2 for any two longs l1 and l2, then
+       /// NumberTools.longToString(l1) is lexicographically less than
+       /// NumberTools.longToString(l2). (Similarly for "greater than" and "equals".)
+       /// 
+       /// <p/>
+       /// This class handles <b>all</b> long values (unlike
+       /// {@link Mono.Lucene.Net.Documents.DateField}).
+       /// 
+       /// </summary>
+       /// <deprecated> For new indexes use {@link NumericUtils} instead, which
+       /// provides a sortable binary representation (prefix encoded) of numeric
+       /// values.
+       /// To index and efficiently query numeric values use {@link NumericField}
+       /// and {@link NumericRangeQuery}.
+       /// This class is included for use with existing
+       /// indices and will be removed in a future release.
+       /// </deprecated>
+    [Obsolete("For new indexes use NumericUtils instead, which provides a sortable binary representation (prefix encoded) of numeric values. To index and efficiently query numeric values use NumericField and NumericRangeQuery. This class is included for use with existing indices and will be removed in a future release.")]
+       public class NumberTools
+       {
+               
+               private const int RADIX = 36;
+               
+               private const char NEGATIVE_PREFIX = '-';
+               
+               // NB: NEGATIVE_PREFIX must be < POSITIVE_PREFIX
+               private const char POSITIVE_PREFIX = '0';
+               
+               //NB: this must be less than
+               /// <summary> Equivalent to longToString(Long.MIN_VALUE)</summary>
+#if !PRE_LUCENE_NET_2_0_0_COMPATIBLE
+               public static readonly System.String MIN_STRING_VALUE = NEGATIVE_PREFIX + "0000000000000";
+#else
+        public static readonly System.String MIN_STRING_VALUE = NEGATIVE_PREFIX + "0000000000000000";
+#endif
+               
+               /// <summary> Equivalent to longToString(Long.MAX_VALUE)</summary>
+#if !PRE_LUCENE_NET_2_0_0_COMPATIBLE
+               public static readonly System.String MAX_STRING_VALUE = POSITIVE_PREFIX + "1y2p0ij32e8e7";
+#else
+        public static readonly System.String MAX_STRING_VALUE = POSITIVE_PREFIX + "7fffffffffffffff";
+#endif
+               
+               /// <summary> The length of (all) strings returned by {@link #longToString}</summary>
+               public static readonly int STR_SIZE = MIN_STRING_VALUE.Length;
+               
+               /// <summary> Converts a long to a String suitable for indexing.</summary>
+               public static System.String LongToString(long l)
+               {
+                       
+                       if (l == System.Int64.MinValue)
+                       {
+                               // special case, because long is not symmetric around zero
+                               return MIN_STRING_VALUE;
+                       }
+                       
+                       System.Text.StringBuilder buf = new System.Text.StringBuilder(STR_SIZE);
+                       
+                       if (l < 0)
+                       {
+                               buf.Append(NEGATIVE_PREFIX);
+                               l = System.Int64.MaxValue + l + 1;
+                       }
+                       else
+                       {
+                               buf.Append(POSITIVE_PREFIX);
+                       }
+#if !PRE_LUCENE_NET_2_0_0_COMPATIBLE
+            System.String num = ToString(l);
+#else
+            System.String num = System.Convert.ToString(l, RADIX);
+#endif
+                       
+                       int padLen = STR_SIZE - num.Length - buf.Length;
+                       while (padLen-- > 0)
+                       {
+                               buf.Append('0');
+                       }
+                       buf.Append(num);
+                       
+                       return buf.ToString();
+               }
+               
+               /// <summary> Converts a String that was returned by {@link #longToString} back to a
+               /// long.
+               /// 
+               /// </summary>
+               /// <throws>  IllegalArgumentException </throws>
+               /// <summary>             if the input is null
+               /// </summary>
+               /// <throws>  NumberFormatException </throws>
+               /// <summary>             if the input does not parse (it was not a String returned by
+               /// longToString()).
+               /// </summary>
+               public static long StringToLong(System.String str)
+               {
+                       if (str == null)
+                       {
+                               throw new System.NullReferenceException("string cannot be null");
+                       }
+                       if (str.Length != STR_SIZE)
+                       {
+                               throw new System.FormatException("string is the wrong size");
+                       }
+                       
+                       if (str.Equals(MIN_STRING_VALUE))
+                       {
+                               return System.Int64.MinValue;
+                       }
+                       
+                       char prefix = str[0];
+#if !PRE_LUCENE_NET_2_0_0_COMPATIBLE
+                       long l = ToLong(str.Substring(1));
+#else
+            long l = System.Convert.ToInt64(str.Substring(1), RADIX);
+#endif
+                       
+                       if (prefix == POSITIVE_PREFIX)
+                       {
+                               // nop
+                       }
+                       else if (prefix == NEGATIVE_PREFIX)
+                       {
+                               l = l - System.Int64.MaxValue - 1;
+                       }
+                       else
+                       {
+                               throw new System.FormatException("string does not begin with the correct prefix");
+                       }
+                       
+                       return l;
+               }
+
+#if !PRE_LUCENE_NET_2_0_0_COMPATIBLE
+        #region BASE36 OPS 
+        static System.String digits = "0123456789abcdefghijklmnopqrstuvwxyz";
+        static long[] powersOf36 = 
+            {
+                1L,
+                36L,
+                36L*36L,
+                36L*36L*36L,
+                36L*36L*36L*36L,
+                36L*36L*36L*36L*36L,
+                36L*36L*36L*36L*36L*36L,
+                36L*36L*36L*36L*36L*36L*36L,
+                36L*36L*36L*36L*36L*36L*36L*36L,
+                36L*36L*36L*36L*36L*36L*36L*36L*36L,
+                36L*36L*36L*36L*36L*36L*36L*36L*36L*36L,
+                36L*36L*36L*36L*36L*36L*36L*36L*36L*36L*36L,
+                36L*36L*36L*36L*36L*36L*36L*36L*36L*36L*36L*36L
+            };
+
+        public static System.String ToString(long lval)
+        {
+            if (lval == 0)
+            {
+                return "0";
+            }
+
+            int maxStrLen = powersOf36.Length;
+            long curval = lval;
+
+            char[] tb = new char[maxStrLen];
+            int outpos = 0;
+            for (int i = 0; i < maxStrLen; i++)
+            {
+                long pval = powersOf36[maxStrLen - i - 1];
+                int pos = (int)(curval / pval);
+                tb[outpos++] = digits.Substring(pos, 1).ToCharArray()[0];
+                curval = curval % pval;
+            }
+            if (outpos == 0)
+                tb[outpos++] = '0';
+            return new System.String(tb, 0, outpos).TrimStart('0');
+        }
+
+        public static long ToLong(System.String t)
+        {
+            long ival = 0;
+            char[] tb = t.ToCharArray();
+            for (int i = 0; i < tb.Length; i++)
+            {
+                ival += powersOf36[i] * digits.IndexOf(tb[tb.Length - i - 1]);
+            }
+            return ival;
+        }
+        #endregion
+#endif
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Document/NumericField.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Document/NumericField.cs
new file mode 100644 (file)
index 0000000..18d9953
--- /dev/null
@@ -0,0 +1,302 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using NumericTokenStream = Mono.Lucene.Net.Analysis.NumericTokenStream;
+using TokenStream = Mono.Lucene.Net.Analysis.TokenStream;
+using NumericUtils = Mono.Lucene.Net.Util.NumericUtils;
+using FieldCache = Mono.Lucene.Net.Search.FieldCache;
+using NumericRangeFilter = Mono.Lucene.Net.Search.NumericRangeFilter;
+using NumericRangeQuery = Mono.Lucene.Net.Search.NumericRangeQuery;
+using SortField = Mono.Lucene.Net.Search.SortField;
+
+namespace Mono.Lucene.Net.Documents
+{
+       // javadocs
+       
+       /// <summary> <p/>This class provides a {@link Field} that enables indexing
+       /// of numeric values for efficient range filtering and
+       /// sorting.  Here's an example usage, adding an int value:
+       /// <pre>
+       /// document.add(new NumericField(name).setIntValue(value));
+       /// </pre>
+       /// 
+       /// For optimal performance, re-use the
+       /// <code>NumericField</code> and {@link Document} instance for more than
+       /// one document:
+       /// 
+       /// <pre>
+       /// NumericField field = new NumericField(name);
+       /// Document document = new Document();
+       /// document.add(field);
+       /// 
+       /// for(all documents) {
+       /// ...
+       /// field.setIntValue(value)
+       /// writer.addDocument(document);
+       /// ...
+       /// }
+       /// </pre>
+       /// 
+       /// <p/>The java native types <code>int</code>, <code>long</code>,
+       /// <code>float</code> and <code>double</code> are
+       /// directly supported.  However, any value that can be
+       /// converted into these native types can also be indexed.
+       /// For example, date/time values represented by a
+       /// {@link java.util.Date} can be translated into a long
+       /// value using the {@link java.util.Date#getTime} method.  If you
+       /// don't need millisecond precision, you can quantize the
+       /// value, either by dividing the result of
+       /// {@link java.util.Date#getTime} or using the separate getters
+       /// (for year, month, etc.) to construct an <code>int</code> or
+       /// <code>long</code> value.<p/>
+       /// 
+       /// <p/>To perform range querying or filtering against a
+       /// <code>NumericField</code>, use {@link NumericRangeQuery} or {@link
+       /// NumericRangeFilter}.  To sort according to a
+       /// <code>NumericField</code>, use the normal numeric sort types, eg
+       /// {@link SortField#INT} (note that {@link SortField#AUTO}
+       /// will not work with these fields).  <code>NumericField</code> values
+       /// can also be loaded directly from {@link FieldCache}.<p/>
+       /// 
+       /// <p/>By default, a <code>NumericField</code>'s value is not stored but
+       /// is indexed for range filtering and sorting.  You can use
+       /// the {@link #NumericField(String,Field.Store,boolean)}
+       /// constructor if you need to change these defaults.<p/>
+       /// 
+       /// <p/>You may add the same field name as a <code>NumericField</code> to
+       /// the same document more than once.  Range querying and
+       /// filtering will be the logical OR of all values; so a range query
+       /// will hit all documents that have at least one value in
+       /// the range. However sort behavior is not defined.  If you need to sort,
+       /// you should separately index a single-valued <code>NumericField</code>.<p/>
+       /// 
+       /// <p/>A <code>NumericField</code> will consume somewhat more disk space
+       /// in the index than an ordinary single-valued field.
+       /// However, for a typical index that includes substantial
+       /// textual content per document, this increase will likely
+       /// be in the noise. <p/>
+       /// 
+       /// <p/>Within Lucene, each numeric value is indexed as a
+       /// <em>trie</em> structure, where each term is logically
+       /// assigned to larger and larger pre-defined brackets (which
+       /// are simply lower-precision representations of the value).
+       /// The step size between each successive bracket is called the
+       /// <code>precisionStep</code>, measured in bits.  Smaller
+       /// <code>precisionStep</code> values result in larger number
+       /// of brackets, which consumes more disk space in the index
+       /// but may result in faster range search performance.  The
+       /// default value, 4, was selected for a reasonable tradeoff
+       /// of disk space consumption versus performance.  You can
+       /// use the expert constructor {@link
+       /// #NumericField(String,int,Field.Store,boolean)} if you'd
+       /// like to change the value.  Note that you must also
+       /// specify a congruent value when creating {@link
+       /// NumericRangeQuery} or {@link NumericRangeFilter}.
+       /// For low cardinality fields larger precision steps are good.
+       /// If the cardinality is &lt; 100, it is fair
+       /// to use {@link Integer#MAX_VALUE}, which produces one
+       /// term per value.
+       /// 
+       /// <p/>For more information on the internals of numeric trie
+       /// indexing, including the <a
+       /// href="../search/NumericRangeQuery.html#precisionStepDesc"><code>precisionStep</code></a>
+       /// configuration, see {@link NumericRangeQuery}. The format of
+       /// indexed values is described in {@link NumericUtils}.
+       /// 
+       /// <p/>If you only need to sort by numeric value, and never
+       /// run range querying/filtering, you can index using a
+       /// <code>precisionStep</code> of {@link Integer#MAX_VALUE}.
+       /// This will minimize disk space consumed. <p/>
+       /// 
+       /// <p/>More advanced users can instead use {@link
+       /// NumericTokenStream} directly, when indexing numbers. This
+       /// class is a wrapper around this token stream type for
+       /// easier, more intuitive usage.<p/>
+       /// 
+       /// <p/><b>NOTE:</b> This class is only used during
+       /// indexing. When retrieving the stored field value from a
+       /// {@link Document} instance after search, you will get a
+       /// conventional {@link Fieldable} instance where the numeric
+       /// values are returned as {@link String}s (according to
+       /// <code>toString(value)</code> of the used data type).
+       /// 
+       /// <p/><font color="red"><b>NOTE:</b> This API is
+       /// experimental and might change in incompatible ways in the
+       /// next release.</font>
+       /// 
+       /// </summary>
+       /// <since> 2.9
+       /// </since>
+       [Serializable]
+       public sealed class NumericField:AbstractField
+       {
+               
+               new private NumericTokenStream tokenStream;
+               
+               /// <summary> Creates a field for numeric values using the default <code>precisionStep</code>
+               /// {@link NumericUtils#PRECISION_STEP_DEFAULT} (4). The instance is not yet initialized with
+               /// a numeric value, before indexing a document containing this field,
+               /// set a value using the various set<em>???</em>Value() methods.
+               /// This constructor creates an indexed, but not stored field.
+               /// </summary>
+               /// <param name="name">the field name
+               /// </param>
+               public NumericField(System.String name):this(name, NumericUtils.PRECISION_STEP_DEFAULT, Field.Store.NO, true)
+               {
+               }
+               
+               /// <summary> Creates a field for numeric values using the default <code>precisionStep</code>
+               /// {@link NumericUtils#PRECISION_STEP_DEFAULT} (4). The instance is not yet initialized with
+               /// a numeric value, before indexing a document containing this field,
+               /// set a value using the various set<em>???</em>Value() methods.
+               /// </summary>
+               /// <param name="name">the field name
+               /// </param>
+               /// <param name="store">if the field should be stored in plain text form
+               /// (according to <code>toString(value)</code> of the used data type)
+               /// </param>
+               /// <param name="index">if the field should be indexed using {@link NumericTokenStream}
+               /// </param>
+               public NumericField(System.String name, Field.Store store, bool index):this(name, NumericUtils.PRECISION_STEP_DEFAULT, store, index)
+               {
+               }
+               
+               /// <summary> Creates a field for numeric values with the specified
+               /// <code>precisionStep</code>. The instance is not yet initialized with
+               /// a numeric value, before indexing a document containing this field,
+               /// set a value using the various set<em>???</em>Value() methods.
+               /// This constructor creates an indexed, but not stored field.
+               /// </summary>
+               /// <param name="name">the field name
+               /// </param>
+               /// <param name="precisionStep">the used <a href="../search/NumericRangeQuery.html#precisionStepDesc">precision step</a>
+               /// </param>
+               public NumericField(System.String name, int precisionStep):this(name, precisionStep, Field.Store.NO, true)
+               {
+               }
+               
+               /// <summary> Creates a field for numeric values with the specified
+               /// <code>precisionStep</code>. The instance is not yet initialized with
+               /// a numeric value, before indexing a document containing this field,
+               /// set a value using the various set<em>???</em>Value() methods.
+               /// </summary>
+               /// <param name="name">the field name
+               /// </param>
+               /// <param name="precisionStep">the used <a href="../search/NumericRangeQuery.html#precisionStepDesc">precision step</a>
+               /// </param>
+               /// <param name="store">if the field should be stored in plain text form
+               /// (according to <code>toString(value)</code> of the used data type)
+               /// </param>
+               /// <param name="index">if the field should be indexed using {@link NumericTokenStream}
+               /// </param>
+               public NumericField(System.String name, int precisionStep, Field.Store store, bool index):base(name, store, index?Field.Index.ANALYZED_NO_NORMS:Field.Index.NO, Field.TermVector.NO)
+               {
+                       SetOmitTermFreqAndPositions(true);
+                       tokenStream = new NumericTokenStream(precisionStep);
+               }
+               
+               /// <summary>Returns a {@link NumericTokenStream} for indexing the numeric value. </summary>
+               public override TokenStream TokenStreamValue()
+               {
+                       return IsIndexed()?tokenStream:null;
+               }
+               
+               /// <summary>Returns always <code>null</code> for numeric fields </summary>
+               public override byte[] BinaryValue()
+               {
+                       return null;
+               }
+               
+               /// <summary>Returns always <code>null</code> for numeric fields </summary>
+               public override byte[] GetBinaryValue(byte[] result)
+               {
+                       return null;
+               }
+               
+               /// <summary>Returns always <code>null</code> for numeric fields </summary>
+               public override System.IO.TextReader ReaderValue()
+               {
+                       return null;
+               }
+               
+               /// <summary>Returns the numeric value as a string (how it is stored, when {@link Field.Store#YES} is chosen). </summary>
+               public override System.String StringValue()
+               {
+                       return (fieldsData == null)?null:fieldsData.ToString();
+               }
+               
+               /// <summary>Returns the current numeric value as a subclass of {@link Number}, <code>null</code> if not yet initialized. </summary>
+               public System.ValueType GetNumericValue()
+               {
+                       return (System.ValueType) fieldsData;
+               }
+               
+               /// <summary> Initializes the field with the supplied <code>long</code> value.</summary>
+               /// <param name="value">the numeric value
+               /// </param>
+               /// <returns> this instance, because of this you can use it the following way:
+               /// <code>document.add(new NumericField(name, precisionStep).SetLongValue(value))</code>
+               /// </returns>
+               public NumericField SetLongValue(long value_Renamed)
+               {
+                       tokenStream.SetLongValue(value_Renamed);
+                       fieldsData = (long) value_Renamed;
+                       return this;
+               }
+               
+               /// <summary> Initializes the field with the supplied <code>int</code> value.</summary>
+               /// <param name="value">the numeric value
+               /// </param>
+               /// <returns> this instance, because of this you can use it the following way:
+               /// <code>document.add(new NumericField(name, precisionStep).setIntValue(value))</code>
+               /// </returns>
+               public NumericField SetIntValue(int value_Renamed)
+               {
+                       tokenStream.SetIntValue(value_Renamed);
+                       fieldsData = (System.Int32) value_Renamed;
+                       return this;
+               }
+               
+               /// <summary> Initializes the field with the supplied <code>double</code> value.</summary>
+               /// <param name="value">the numeric value
+               /// </param>
+               /// <returns> this instance, because of this you can use it the following way:
+               /// <code>document.add(new NumericField(name, precisionStep).setDoubleValue(value))</code>
+               /// </returns>
+               public NumericField SetDoubleValue(double value_Renamed)
+               {
+                       tokenStream.SetDoubleValue(value_Renamed);
+                       fieldsData = (double) value_Renamed;
+                       return this;
+               }
+               
+               /// <summary> Initializes the field with the supplied <code>float</code> value.</summary>
+               /// <param name="value">the numeric value
+               /// </param>
+               /// <returns> this instance, because of this you can use it the following way:
+               /// <code>document.add(new NumericField(name, precisionStep).setFloatValue(value))</code>
+               /// </returns>
+               public NumericField SetFloatValue(float value_Renamed)
+               {
+                       tokenStream.SetFloatValue(value_Renamed);
+                       fieldsData = (float) value_Renamed;
+                       return this;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Document/Package.html b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Document/Package.html
new file mode 100644 (file)
index 0000000..279cfff
--- /dev/null
@@ -0,0 +1,56 @@
+<!doctype html public "-//w3c//dtd html 4.0 transitional//en">\r
+<!--\r
+ Licensed to the Apache Software Foundation (ASF) under one or more\r
+ contributor license agreements.  See the NOTICE file distributed with\r
+ this work for additional information regarding copyright ownership.\r
+ The ASF licenses this file to You under the Apache License, Version 2.0\r
+ (the "License"); you may not use this file except in compliance with\r
+ the License.  You may obtain a copy of the License at\r
+\r
+     http://www.apache.org/licenses/LICENSE-2.0\r
+\r
+ Unless required by applicable law or agreed to in writing, software\r
+ distributed under the License is distributed on an "AS IS" BASIS,\r
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
+ See the License for the specific language governing permissions and\r
+ limitations under the License.\r
+-->\r
+<html>\r
+<head>\r
+   <meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">\r
+</head>\r
+<body>\r
+<p>The logical representation of a {@link Lucene.Net.Documents.Document} for indexing and searching.</p>\r
+<p>The document package provides the user level logical representation of content to be indexed and searched.  The\r
+package also provides utilities for working with {@link Lucene.Net.Documents.Document}s and {@link Lucene.Net.Documents.Fieldable}s.</p>\r
+<h2>Document and Fieldable</h2>\r
+<p>A {@link Lucene.Net.Documents.Document} is a collection of {@link Lucene.Net.Documents.Fieldable}s.  A\r
+  {@link Lucene.Net.Documents.Fieldable} is a logical representation of a user's content that needs to be indexed or stored.\r
+  {@link Lucene.Net.Documents.Fieldable}s have a number of properties that tell Lucene how to treat the content (like indexed, tokenized,\r
+  stored, etc.)  See the {@link Lucene.Net.Documents.Field} implementation of {@link Lucene.Net.Documents.Fieldable}\r
+  for specifics on these properties.\r
+</p>\r
+<p>Note: it is common to refer to {@link Lucene.Net.Documents.Document}s having {@link Lucene.Net.Documents.Field}s, even though technically they have\r
+{@link Lucene.Net.Documents.Fieldable}s.</p>\r
+<h2>Working with Documents</h2>\r
+<p>First and foremost, a {@link Lucene.Net.Documents.Document} is something created by the user application.  It is your job\r
+  to create Documents based on the content of the files you are working with in your application (Word, txt, PDF, Excel or any other format.)\r
+  How this is done is completely up to you.  That being said, there are many tools available in other projects that can make\r
+  the process of taking a file and converting it into a Lucene {@link Lucene.Net.Documents.Document}.  To see an example of this,\r
+  take a look at the Lucene <a href = "gettingstarted.html" target = "top">demo</a> and the associated source code\r
+  for extracting content from HTML.\r
+</p>\r
+<p>The {@link Lucene.Net.Documents.DateTools} is a utility class to make dates and times searchable\r
+(remember, Lucene only searches text). {@link Lucene.Net.Documents.NumericField} is a special helper class\r
+to simplify indexing of numeric values (and also dates) for fast range range queries with {@link Lucene.Net.Search.NumericRangeQuery}\r
+(using a special sortable string representation of numeric values).</p>\r
+<p>The {@link Lucene.Net.Documents.FieldSelector} class provides a mechanism to tell Lucene how to load Documents from\r
+storage.  If no FieldSelector is used, all Fieldables on a Document will be loaded.  As an example of the FieldSelector usage, consider\r
+  the common use case of\r
+displaying search results on a web page and then having users click through to see the full document.  In this scenario, it is often\r
+  the case that there are many small fields and one or two large fields (containing the contents of the original file). Before the FieldSelector,\r
+the full Document had to be loaded, including the large fields, in order to display the results.  Now, using the FieldSelector, one\r
+can {@link Lucene.Net.Documents.FieldSelectorResult#LAZY_LOAD} the large fields, thus only loading the large fields\r
+when a user clicks on the actual link to view the original content.</p>\r
+</body>\r
+</html>\r
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Document/SetBasedFieldSelector.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Document/SetBasedFieldSelector.cs
new file mode 100644 (file)
index 0000000..f67b637
--- /dev/null
@@ -0,0 +1,71 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Documents
+{
+       /// <summary> Declare what fields to load normally and what fields to load lazily
+       /// 
+       /// 
+       /// </summary>
+       [Serializable]
+       public class SetBasedFieldSelector : FieldSelector
+       {
+               
+               private System.Collections.Hashtable fieldsToLoad;
+               private System.Collections.Hashtable lazyFieldsToLoad;
+               
+               
+               
+               /// <summary> Pass in the Set of {@link Field} names to load and the Set of {@link Field} names to load lazily.  If both are null, the
+               /// Document will not have any {@link Field} on it.  
+               /// </summary>
+               /// <param name="fieldsToLoad">A Set of {@link String} field names to load.  May be empty, but not null
+               /// </param>
+               /// <param name="lazyFieldsToLoad">A Set of {@link String} field names to load lazily.  May be empty, but not null  
+               /// </param>
+               public SetBasedFieldSelector(System.Collections.Hashtable fieldsToLoad, System.Collections.Hashtable lazyFieldsToLoad)
+               {
+                       this.fieldsToLoad = fieldsToLoad;
+                       this.lazyFieldsToLoad = lazyFieldsToLoad;
+               }
+               
+               /// <summary> Indicate whether to load the field with the given name or not. If the {@link Field#Name()} is not in either of the 
+               /// initializing Sets, then {@link Mono.Lucene.Net.Documents.FieldSelectorResult#NO_LOAD} is returned.  If a Field name
+               /// is in both <code>fieldsToLoad</code> and <code>lazyFieldsToLoad</code>, lazy has precedence.
+               /// 
+               /// </summary>
+               /// <param name="fieldName">The {@link Field} name to check
+               /// </param>
+               /// <returns> The {@link FieldSelectorResult}
+               /// </returns>
+               public virtual FieldSelectorResult Accept(System.String fieldName)
+               {
+                       FieldSelectorResult result = FieldSelectorResult.NO_LOAD;
+                       if (fieldsToLoad.Contains(fieldName) == true)
+                       {
+                               result = FieldSelectorResult.LOAD;
+                       }
+                       if (lazyFieldsToLoad.Contains(fieldName) == true)
+                       {
+                               result = FieldSelectorResult.LAZY_LOAD;
+                       }
+                       return result;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/.gitattributes b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/.gitattributes
new file mode 100644 (file)
index 0000000..a86b76c
--- /dev/null
@@ -0,0 +1,133 @@
+/AbstractAllTermDocs.cs -crlf
+/AllTermDocs.cs -crlf
+/BufferedDeletes.cs -crlf
+/ByteBlockPool.cs -crlf
+/ByteSliceReader.cs -crlf
+/ByteSliceWriter.cs -crlf
+/CharBlockPool.cs -crlf
+/CheckIndex.cs -crlf
+/CompoundFileReader.cs -crlf
+/CompoundFileWriter.cs -crlf
+/ConcurrentMergeScheduler.cs -crlf
+/CorruptIndexException.cs -crlf
+/DefaultSkipListReader.cs -crlf
+/DefaultSkipListWriter.cs -crlf
+/DirectoryOwningReader.cs -crlf
+/DirectoryReader.cs -crlf
+/DocConsumer.cs -crlf
+/DocConsumerPerThread.cs -crlf
+/DocFieldConsumer.cs -crlf
+/DocFieldConsumerPerField.cs -crlf
+/DocFieldConsumerPerThread.cs -crlf
+/DocFieldConsumers.cs -crlf
+/DocFieldConsumersPerField.cs -crlf
+/DocFieldConsumersPerThread.cs -crlf
+/DocFieldProcessor.cs -crlf
+/DocFieldProcessorPerField.cs -crlf
+/DocFieldProcessorPerThread.cs -crlf
+/DocInverter.cs -crlf
+/DocInverterPerField.cs -crlf
+/DocInverterPerThread.cs -crlf
+/DocumentsWriter.cs -crlf
+/DocumentsWriterThreadState.cs -crlf
+/FieldInfo.cs -crlf
+/FieldInfos.cs -crlf
+/FieldInvertState.cs -crlf
+/FieldReaderException.cs -crlf
+/FieldSortedTermVectorMapper.cs -crlf
+/FieldsReader.cs -crlf
+/FieldsWriter.cs -crlf
+/FilterIndexReader.cs -crlf
+/FormatPostingsDocsConsumer.cs -crlf
+/FormatPostingsDocsWriter.cs -crlf
+/FormatPostingsFieldsConsumer.cs -crlf
+/FormatPostingsFieldsWriter.cs -crlf
+/FormatPostingsPositionsConsumer.cs -crlf
+/FormatPostingsPositionsWriter.cs -crlf
+/FormatPostingsTermsConsumer.cs -crlf
+/FormatPostingsTermsWriter.cs -crlf
+/FreqProxFieldMergeState.cs -crlf
+/FreqProxTermsWriter.cs -crlf
+/FreqProxTermsWriterPerField.cs -crlf
+/FreqProxTermsWriterPerThread.cs -crlf
+/IndexCommit.cs -crlf
+/IndexCommitPoint.cs -crlf
+/IndexDeletionPolicy.cs -crlf
+/IndexFileDeleter.cs -crlf
+/IndexFileNameFilter.cs -crlf
+/IndexFileNames.cs -crlf
+/IndexModifier.cs -crlf
+/IndexReader.cs -crlf
+/IndexWriter.cs -crlf
+/IntBlockPool.cs -crlf
+/InvertedDocConsumer.cs -crlf
+/InvertedDocConsumerPerField.cs -crlf
+/InvertedDocConsumerPerThread.cs -crlf
+/InvertedDocEndConsumer.cs -crlf
+/InvertedDocEndConsumerPerField.cs -crlf
+/InvertedDocEndConsumerPerThread.cs -crlf
+/KeepOnlyLastCommitDeletionPolicy.cs -crlf
+/LogByteSizeMergePolicy.cs -crlf
+/LogDocMergePolicy.cs -crlf
+/LogMergePolicy.cs -crlf
+/MergeDocIDRemapper.cs -crlf
+/MergePolicy.cs -crlf
+/MergeScheduler.cs -crlf
+/MultiLevelSkipListReader.cs -crlf
+/MultiLevelSkipListWriter.cs -crlf
+/MultiReader.cs -crlf
+/MultipleTermPositions.cs -crlf
+/NormsWriter.cs -crlf
+/NormsWriterPerField.cs -crlf
+/NormsWriterPerThread.cs -crlf
+/Package.html -crlf
+/ParallelReader.cs -crlf
+/Payload.cs -crlf
+/PositionBasedTermVectorMapper.cs -crlf
+/RawPostingList.cs -crlf
+/ReadOnlyDirectoryReader.cs -crlf
+/ReadOnlySegmentReader.cs -crlf
+/ReusableStringReader.cs -crlf
+/SegmentInfo.cs -crlf
+/SegmentInfos.cs -crlf
+/SegmentMergeInfo.cs -crlf
+/SegmentMergeQueue.cs -crlf
+/SegmentMerger.cs -crlf
+/SegmentReader.cs -crlf
+/SegmentTermDocs.cs -crlf
+/SegmentTermEnum.cs -crlf
+/SegmentTermPositionVector.cs -crlf
+/SegmentTermPositions.cs -crlf
+/SegmentTermVector.cs -crlf
+/SegmentWriteState.cs -crlf
+/SerialMergeScheduler.cs -crlf
+/SnapshotDeletionPolicy.cs -crlf
+/SortedTermVectorMapper.cs -crlf
+/StaleReaderException.cs -crlf
+/StoredFieldsWriter.cs -crlf
+/StoredFieldsWriterPerThread.cs -crlf
+/Term.cs -crlf
+/TermBuffer.cs -crlf
+/TermDocs.cs -crlf
+/TermEnum.cs -crlf
+/TermFreqVector.cs -crlf
+/TermInfo.cs -crlf
+/TermInfosReader.cs -crlf
+/TermInfosWriter.cs -crlf
+/TermPositionVector.cs -crlf
+/TermPositions.cs -crlf
+/TermVectorEntry.cs -crlf
+/TermVectorEntryFreqSortedComparator.cs -crlf
+/TermVectorMapper.cs -crlf
+/TermVectorOffsetInfo.cs -crlf
+/TermVectorsReader.cs -crlf
+/TermVectorsTermsWriter.cs -crlf
+/TermVectorsTermsWriterPerField.cs -crlf
+/TermVectorsTermsWriterPerThread.cs -crlf
+/TermVectorsWriter.cs -crlf
+/TermsHash.cs -crlf
+/TermsHashConsumer.cs -crlf
+/TermsHashConsumerPerField.cs -crlf
+/TermsHashConsumerPerThread.cs -crlf
+/TermsHashPerField.cs -crlf
+/TermsHashPerThread.cs -crlf
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/AbstractAllTermDocs.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/AbstractAllTermDocs.cs
new file mode 100644 (file)
index 0000000..335ca91
--- /dev/null
@@ -0,0 +1,112 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+using System.Collections.Generic;
+using System.Text;
+
+namespace Mono.Lucene.Net.Index
+{
+    /// <summary>
+    /// Base class for enumerating all but deleted docs.
+    /// 
+    /// <p/>NOTE: this class is meant only to be used internally
+    /// by Lucene; it's only public so it can be shared across
+    /// packages.  This means the API is freely subject to
+    /// change, and, the class could be removed entirely, in any
+    /// Lucene release.  Use directly at your own risk! */
+    /// </summary>
+    public abstract class AbstractAllTermDocs : TermDocs
+    {
+        protected int maxDoc;
+        protected int doc = -1;
+
+        protected AbstractAllTermDocs(int maxDoc)
+        {
+            this.maxDoc = maxDoc;
+        }
+
+        public void Seek(Term term)
+        {
+            if (term == null)
+            {
+                doc = -1;
+            }
+            else
+            {
+                throw new NotSupportedException();
+            }
+        }
+
+        public void Seek(TermEnum termEnum)
+        {
+            throw new NotSupportedException();
+        }
+
+        public int Doc()
+        {
+            return doc;
+        }
+
+        public int Freq()
+        {
+            return 1;
+        }
+
+        public bool Next()
+        {
+            return SkipTo(doc + 1);
+        }
+
+        public int Read(int[] docs, int[] freqs)
+        {
+            int length = docs.Length;
+            int i = 0;
+            while (i < length && doc < maxDoc)
+            {
+                if (!IsDeleted(doc))
+                {
+                    docs[i] = doc;
+                    freqs[i] = 1;
+                    ++i;
+                }
+                doc++;
+            }
+            return i;
+        }
+
+        public bool SkipTo(int target)
+        {
+            doc = target;
+            while (doc < maxDoc)
+            {
+                if (!IsDeleted(doc))
+                {
+                    return true;
+                }
+                doc++;
+            }
+            return false;
+        }
+
+        public void Close()
+        {
+        }
+
+        public abstract bool IsDeleted(int doc);
+    }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/AllTermDocs.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/AllTermDocs.cs
new file mode 100644 (file)
index 0000000..ddca767
--- /dev/null
@@ -0,0 +1,42 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using BitVector = Mono.Lucene.Net.Util.BitVector;
+
+namespace Mono.Lucene.Net.Index
+{
+
+    class AllTermDocs : AbstractAllTermDocs
+       {
+               protected internal BitVector deletedDocs;
+                               
+               protected internal AllTermDocs(SegmentReader parent) : base(parent.MaxDoc())
+               {
+                       lock (parent)
+                       {
+                               this.deletedDocs = parent.deletedDocs;
+                       }
+               }
+
+        public override bool IsDeleted(int doc)
+        {
+            return deletedDocs != null && deletedDocs.Get(doc);
+        }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/BufferedDeletes.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/BufferedDeletes.cs
new file mode 100644 (file)
index 0000000..0598152
--- /dev/null
@@ -0,0 +1,202 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       /// <summary>Holds buffered deletes, by docID, term or query.  We
+       /// hold two instances of this class: one for the deletes
+       /// prior to the last flush, the other for deletes after
+       /// the last flush.  This is so if we need to abort
+       /// (discard all buffered docs) we can also discard the
+       /// buffered deletes yet keep the deletes done during
+       /// previously flushed segments. 
+       /// </summary>
+       class BufferedDeletes
+       {
+               internal int numTerms;
+        internal System.Collections.IDictionary terms = null;
+               internal System.Collections.Hashtable queries = new System.Collections.Hashtable();
+               internal System.Collections.ArrayList docIDs = new System.Collections.ArrayList();
+               internal long bytesUsed;
+        internal  bool doTermSort;
+
+        public BufferedDeletes(bool doTermSort)
+        {
+            this.doTermSort = doTermSort;
+            if (doTermSort)
+            {
+                terms = new System.Collections.Generic.SortedDictionary<object, object>();
+            }
+            else
+            {
+                terms = new System.Collections.Hashtable();
+            }
+        }
+                
+               
+               // Number of documents a delete term applies to.
+               internal sealed class Num
+               {
+                       internal int num;
+                       
+                       internal Num(int num)
+                       {
+                               this.num = num;
+                       }
+                       
+                       internal int GetNum()
+                       {
+                               return num;
+                       }
+                       
+                       internal void  SetNum(int num)
+                       {
+                               // Only record the new number if it's greater than the
+                               // current one.  This is important because if multiple
+                               // threads are replacing the same doc at nearly the
+                               // same time, it's possible that one thread that got a
+                               // higher docID is scheduled before the other
+                               // threads.
+                               if (num > this.num)
+                                       this.num = num;
+                       }
+               }
+               
+               internal virtual int Size()
+               {
+                       // We use numTerms not terms.size() intentionally, so
+                       // that deletes by the same term multiple times "count",
+                       // ie if you ask to flush every 1000 deletes then even
+                       // dup'd terms are counted towards that 1000
+                       return numTerms + queries.Count + docIDs.Count;
+               }
+               
+               internal virtual void  Update(BufferedDeletes in_Renamed)
+               {
+                       numTerms += in_Renamed.numTerms;
+                       bytesUsed += in_Renamed.bytesUsed;
+
+                       System.Collections.ArrayList keys = new System.Collections.ArrayList(in_Renamed.terms.Keys);
+                       System.Collections.ArrayList values = new System.Collections.ArrayList(in_Renamed.terms.Values);
+                       for (int i=0; i < keys.Count; i++)
+                               terms[keys[i]] = values[i];
+
+                       keys = new System.Collections.ArrayList(in_Renamed.queries.Keys);
+                       values = new System.Collections.ArrayList(in_Renamed.queries.Values);
+                       for (int i=0; i < keys.Count; i++)
+                               queries[keys[i]] = values[i];
+
+                       docIDs.AddRange(in_Renamed.docIDs);
+                       in_Renamed.Clear();
+               }
+               
+               internal virtual void  Clear()
+               {
+                       terms.Clear();
+                       queries.Clear();
+                       docIDs.Clear();
+                       numTerms = 0;
+                       bytesUsed = 0;
+               }
+               
+               internal virtual void  AddBytesUsed(long b)
+               {
+                       bytesUsed += b;
+               }
+               
+               internal virtual bool Any()
+               {
+                       return terms.Count > 0 || docIDs.Count > 0 || queries.Count > 0;
+               }
+               
+               // Remaps all buffered deletes based on a completed
+               // merge
+               internal virtual void  Remap(MergeDocIDRemapper mapper, SegmentInfos infos, int[][] docMaps, int[] delCounts, MergePolicy.OneMerge merge, int mergeDocCount)
+               {
+                       lock (this)
+                       {
+                               
+                               System.Collections.IDictionary newDeleteTerms;
+                               
+                               // Remap delete-by-term
+                               if (terms.Count > 0)
+                               {
+                    if (doTermSort)
+                    {
+                        newDeleteTerms = new System.Collections.Generic.SortedDictionary<object, object>();
+                    }
+                    else
+                    {
+                        newDeleteTerms = new System.Collections.Hashtable();
+                    }
+                                       System.Collections.IEnumerator iter = new System.Collections.Hashtable(terms).GetEnumerator();
+                                       while (iter.MoveNext())
+                                       {
+                                               System.Collections.DictionaryEntry entry = (System.Collections.DictionaryEntry) iter.Current;
+                                               Num num = (Num) entry.Value;
+                                               newDeleteTerms[entry.Key] = new Num(mapper.Remap(num.GetNum()));
+                                       }
+                               }
+                               else
+                                       newDeleteTerms = null;
+                               
+                               // Remap delete-by-docID
+                               System.Collections.ArrayList newDeleteDocIDs;
+                               
+                               if (docIDs.Count > 0)
+                               {
+                                       newDeleteDocIDs = new System.Collections.ArrayList(docIDs.Count);
+                                       System.Collections.IEnumerator iter = docIDs.GetEnumerator();
+                                       while (iter.MoveNext())
+                                       {
+                                               System.Int32 num = (System.Int32) iter.Current;
+                                               newDeleteDocIDs.Add((System.Int32) mapper.Remap(num));
+                                       }
+                               }
+                               else
+                                       newDeleteDocIDs = null;
+                               
+                               // Remap delete-by-query
+                               System.Collections.Hashtable newDeleteQueries;
+                               
+                               if (queries.Count > 0)
+                               {
+                                       newDeleteQueries = new System.Collections.Hashtable(queries.Count);
+                                       System.Collections.IEnumerator iter = new System.Collections.Hashtable(queries).GetEnumerator();
+                                       while (iter.MoveNext())
+                                       {
+                                               System.Collections.DictionaryEntry entry = (System.Collections.DictionaryEntry) iter.Current;
+                                               System.Int32 num = (System.Int32) entry.Value;
+                                               newDeleteQueries[entry.Key] = (System.Int32) mapper.Remap(num);
+                                       }
+                               }
+                               else
+                                       newDeleteQueries = null;
+                               
+                               if (newDeleteTerms != null)
+                                       terms = newDeleteTerms;
+                               if (newDeleteDocIDs != null)
+                                       docIDs = newDeleteDocIDs;
+                               if (newDeleteQueries != null)
+                                       queries = newDeleteQueries;
+                       }
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/ByteBlockPool.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/ByteBlockPool.cs
new file mode 100644 (file)
index 0000000..7e38a0e
--- /dev/null
@@ -0,0 +1,171 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+/* Class that Posting and PostingVector use to write byte
+* streams into shared fixed-size byte[] arrays.  The idea
+* is to allocate slices of increasing lengths For
+* example, the first slice is 5 bytes, the next slice is
+* 14, etc.  We start by writing our bytes into the first
+* 5 bytes.  When we hit the end of the slice, we allocate
+* the next slice and then write the address of the new
+* slice into the last 4 bytes of the previous slice (the
+* "forwarding address").
+*
+* Each slice is filled with 0's initially, and we mark
+* the end with a non-zero byte.  This way the methods
+* that are writing into the slice don't need to record
+* its length and instead allocate a new slice once they
+* hit a non-zero byte. */
+
+using System;
+using System.Collections.Generic;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       sealed public class ByteBlockPool
+       {
+               private void  InitBlock()
+               {
+                       byteUpto = DocumentsWriter.BYTE_BLOCK_SIZE;
+               }
+               
+               public /*internal*/ abstract class Allocator
+               {
+                       public /*internal*/ abstract void  RecycleByteBlocks(byte[][] blocks, int start, int end);
+            public /*internal*/ abstract void RecycleByteBlocks(System.Collections.ArrayList blocks);
+                       public /*internal*/ abstract byte[] GetByteBlock(bool trackAllocations);
+               }
+               
+               public byte[][] buffers = new byte[10][];
+               
+               internal int bufferUpto = - 1; // Which buffer we are upto
+               public int byteUpto; // Where we are in head buffer
+               
+               public byte[] buffer; // Current head buffer
+               public int byteOffset = - DocumentsWriter.BYTE_BLOCK_SIZE; // Current head offset
+               
+               private bool trackAllocations;
+               private Allocator allocator;
+               
+               public ByteBlockPool(Allocator allocator, bool trackAllocations)
+               {
+                       InitBlock();
+                       this.allocator = allocator;
+                       this.trackAllocations = trackAllocations;
+               }
+               
+               public void  Reset()
+               {
+                       if (bufferUpto != - 1)
+                       {
+                               // We allocated at least one buffer
+                               
+                               for (int i = 0; i < bufferUpto; i++)
+                               // Fully zero fill buffers that we fully used
+                                       System.Array.Clear(buffers[i], 0, buffers[i].Length);
+                               
+                               // Partial zero fill the final buffer
+                               System.Array.Clear(buffers[bufferUpto], 0, byteUpto);
+                               
+                               if (bufferUpto > 0)
+                               // Recycle all but the first buffer
+                                       allocator.RecycleByteBlocks(buffers, 1, 1 + bufferUpto);
+                               
+                               // Re-use the first buffer
+                               bufferUpto = 0;
+                               byteUpto = 0;
+                               byteOffset = 0;
+                               buffer = buffers[0];
+                       }
+               }
+               
+               public void  NextBuffer()
+               {
+                       if (1 + bufferUpto == buffers.Length)
+                       {
+                               byte[][] newBuffers = new byte[(int) (buffers.Length * 1.5)][];
+                               Array.Copy(buffers, 0, newBuffers, 0, buffers.Length);
+                               buffers = newBuffers;
+                       }
+                       buffer = buffers[1 + bufferUpto] = allocator.GetByteBlock(trackAllocations);
+                       bufferUpto++;
+                       
+                       byteUpto = 0;
+                       byteOffset += DocumentsWriter.BYTE_BLOCK_SIZE;
+               }
+               
+               public int NewSlice(int size)
+               {
+                       if (byteUpto > DocumentsWriter.BYTE_BLOCK_SIZE - size)
+                               NextBuffer();
+                       int upto = byteUpto;
+                       byteUpto += size;
+                       buffer[byteUpto - 1] = 16;
+                       return upto;
+               }
+               
+               // Size of each slice.  These arrays should be at most 16
+               // elements (index is encoded with 4 bits).  First array
+               // is just a compact way to encode X+1 with a max.  Second
+               // array is the length of each slice, ie first slice is 5
+               // bytes, next slice is 14 bytes, etc.
+               internal static readonly int[] nextLevelArray = new int[]{1, 2, 3, 4, 5, 6, 7, 8, 9, 9};
+               internal static readonly int[] levelSizeArray = new int[]{5, 14, 20, 30, 40, 40, 80, 80, 120, 200};
+               internal static readonly int FIRST_LEVEL_SIZE = levelSizeArray[0];
+        public readonly static int FIRST_LEVEL_SIZE_For_NUnit_Test = levelSizeArray[0];
+               
+               public int AllocSlice(byte[] slice, int upto)
+               {
+                       
+                       int level = slice[upto] & 15;
+                       int newLevel = nextLevelArray[level];
+                       int newSize = levelSizeArray[newLevel];
+                       
+                       // Maybe allocate another block
+                       if (byteUpto > DocumentsWriter.BYTE_BLOCK_SIZE - newSize)
+                               NextBuffer();
+                       
+                       int newUpto = byteUpto;
+                       int offset = newUpto + byteOffset;
+                       byteUpto += newSize;
+                       
+                       // Copy forward the past 3 bytes (which we are about
+                       // to overwrite with the forwarding address):
+                       buffer[newUpto] = slice[upto - 3];
+                       buffer[newUpto + 1] = slice[upto - 2];
+                       buffer[newUpto + 2] = slice[upto - 1];
+                       
+                       // Write forwarding address at end of last slice:
+                       slice[upto - 3] = (byte) (SupportClass.Number.URShift(offset, 24));
+                       slice[upto - 2] = (byte) (SupportClass.Number.URShift(offset, 16));
+                       slice[upto - 1] = (byte) (SupportClass.Number.URShift(offset, 8));
+                       slice[upto] = (byte) offset;
+                       
+                       // Write new level:
+                       buffer[byteUpto - 1] = (byte) (16 | newLevel);
+                       
+                       return newUpto + 3;
+               }
+
+        public static int FIRST_LEVEL_SIZE_ForNUnit
+        {
+            get { return FIRST_LEVEL_SIZE; }
+        }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/ByteSliceReader.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/ByteSliceReader.cs
new file mode 100644 (file)
index 0000000..bae32d5
--- /dev/null
@@ -0,0 +1,183 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexInput = Mono.Lucene.Net.Store.IndexInput;
+using IndexOutput = Mono.Lucene.Net.Store.IndexOutput;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       /* IndexInput that knows how to read the byte slices written
+       * by Posting and PostingVector.  We read the bytes in
+       * each slice until we hit the end of that slice at which
+       * point we read the forwarding address of the next slice
+       * and then jump to it.*/
+       public sealed class ByteSliceReader:IndexInput
+       {
+               internal ByteBlockPool pool;
+               internal int bufferUpto;
+               internal byte[] buffer;
+               public int upto;
+               internal int limit;
+               internal int level;
+               public int bufferOffset;
+               
+               public int endIndex;
+               
+               public void  Init(ByteBlockPool pool, int startIndex, int endIndex)
+               {
+                       
+                       System.Diagnostics.Debug.Assert(endIndex - startIndex >= 0);
+                       System.Diagnostics.Debug.Assert(startIndex >= 0);
+                       System.Diagnostics.Debug.Assert(endIndex >= 0);
+                       
+                       this.pool = pool;
+                       this.endIndex = endIndex;
+                       
+                       level = 0;
+                       bufferUpto = startIndex / DocumentsWriter.BYTE_BLOCK_SIZE;
+                       bufferOffset = bufferUpto * DocumentsWriter.BYTE_BLOCK_SIZE;
+                       buffer = pool.buffers[bufferUpto];
+                       upto = startIndex & DocumentsWriter.BYTE_BLOCK_MASK;
+                       
+                       int firstSize = ByteBlockPool.levelSizeArray[0];
+                       
+                       if (startIndex + firstSize >= endIndex)
+                       {
+                               // There is only this one slice to read
+                               limit = endIndex & DocumentsWriter.BYTE_BLOCK_MASK;
+                       }
+                       else
+                               limit = upto + firstSize - 4;
+               }
+               
+               public bool Eof()
+               {
+                       System.Diagnostics.Debug.Assert(upto + bufferOffset <= endIndex);
+                       return upto + bufferOffset == endIndex;
+               }
+               
+               public override byte ReadByte()
+               {
+                       System.Diagnostics.Debug.Assert(!Eof());
+                       System.Diagnostics.Debug.Assert(upto <= limit);
+                       if (upto == limit)
+                               NextSlice();
+                       return buffer[upto++];
+               }
+               
+               public long WriteTo(IndexOutput out_Renamed)
+               {
+                       long size = 0;
+                       while (true)
+                       {
+                               if (limit + bufferOffset == endIndex)
+                               {
+                                       System.Diagnostics.Debug.Assert(endIndex - bufferOffset >= upto);
+                                       out_Renamed.WriteBytes(buffer, upto, limit - upto);
+                                       size += limit - upto;
+                                       break;
+                               }
+                               else
+                               {
+                                       out_Renamed.WriteBytes(buffer, upto, limit - upto);
+                                       size += limit - upto;
+                                       NextSlice();
+                               }
+                       }
+                       
+                       return size;
+               }
+               
+               public void  NextSlice()
+               {
+                       
+                       // Skip to our next slice
+                       int nextIndex = ((buffer[limit] & 0xff) << 24) + ((buffer[1 + limit] & 0xff) << 16) + ((buffer[2 + limit] & 0xff) << 8) + (buffer[3 + limit] & 0xff);
+                       
+                       level = ByteBlockPool.nextLevelArray[level];
+                       int newSize = ByteBlockPool.levelSizeArray[level];
+                       
+                       bufferUpto = nextIndex / DocumentsWriter.BYTE_BLOCK_SIZE;
+                       bufferOffset = bufferUpto * DocumentsWriter.BYTE_BLOCK_SIZE;
+                       
+                       buffer = pool.buffers[bufferUpto];
+                       upto = nextIndex & DocumentsWriter.BYTE_BLOCK_MASK;
+                       
+                       if (nextIndex + newSize >= endIndex)
+                       {
+                               // We are advancing to the final slice
+                               System.Diagnostics.Debug.Assert(endIndex - nextIndex > 0);
+                               limit = endIndex - bufferOffset;
+                       }
+                       else
+                       {
+                               // This is not the final slice (subtract 4 for the
+                               // forwarding address at the end of this new slice)
+                               limit = upto + newSize - 4;
+                       }
+               }
+               
+               public override void  ReadBytes(byte[] b, int offset, int len)
+               {
+                       while (len > 0)
+                       {
+                               int numLeft = limit - upto;
+                               if (numLeft < len)
+                               {
+                                       // Read entire slice
+                                       Array.Copy(buffer, upto, b, offset, numLeft);
+                                       offset += numLeft;
+                                       len -= numLeft;
+                                       NextSlice();
+                               }
+                               else
+                               {
+                                       // This slice is the last one
+                                       Array.Copy(buffer, upto, b, offset, len);
+                                       upto += len;
+                                       break;
+                               }
+                       }
+               }
+               
+               public override long GetFilePointer()
+               {
+                       throw new System.SystemException("not implemented");
+               }
+               public override long Length()
+               {
+                       throw new System.SystemException("not implemented");
+               }
+               public override void  Seek(long pos)
+               {
+                       throw new System.SystemException("not implemented");
+               }
+               public override void  Close()
+               {
+                       throw new System.SystemException("not implemented");
+               }
+               
+               override public System.Object Clone()
+               {
+            System.Diagnostics.Debug.Fail("Port issue:", "Let see if we need this ByteSliceReader.Clone()"); // {{Aroush-2.9}}
+                       return null;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/ByteSliceWriter.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/ByteSliceWriter.cs
new file mode 100644 (file)
index 0000000..40dd3db
--- /dev/null
@@ -0,0 +1,100 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       /// <summary> Class to write byte streams into slices of shared
+       /// byte[].  This is used by DocumentsWriter to hold the
+       /// posting list for many terms in RAM.
+       /// </summary>
+       
+       public sealed class ByteSliceWriter
+       {
+               
+               private byte[] slice;
+               private int upto;
+               private ByteBlockPool pool;
+               
+               internal int offset0;
+               
+               public ByteSliceWriter(ByteBlockPool pool)
+               {
+                       this.pool = pool;
+               }
+               
+               /// <summary> Set up the writer to write at address.</summary>
+               public void  Init(int address)
+               {
+                       slice = pool.buffers[address >> DocumentsWriter.BYTE_BLOCK_SHIFT];
+                       System.Diagnostics.Debug.Assert(slice != null);
+                       upto = address & DocumentsWriter.BYTE_BLOCK_MASK;
+                       offset0 = address;
+                       System.Diagnostics.Debug.Assert(upto < slice.Length);
+               }
+               
+               /// <summary>Write byte into byte slice stream </summary>
+               public void  WriteByte(byte b)
+               {
+                       System.Diagnostics.Debug.Assert(slice != null);
+                       if (slice[upto] != 0)
+                       {
+                               upto = pool.AllocSlice(slice, upto);
+                               slice = pool.buffer;
+                               offset0 = pool.byteOffset;
+                               System.Diagnostics.Debug.Assert(slice != null);
+                       }
+                       slice[upto++] = b;
+                       System.Diagnostics.Debug.Assert(upto != slice.Length);
+               }
+               
+               public void  WriteBytes(byte[] b, int offset, int len)
+               {
+                       int offsetEnd = offset + len;
+                       while (offset < offsetEnd)
+                       {
+                               if (slice[upto] != 0)
+                               {
+                                       // End marker
+                                       upto = pool.AllocSlice(slice, upto);
+                                       slice = pool.buffer;
+                                       offset0 = pool.byteOffset;
+                               }
+                               
+                               slice[upto++] = b[offset++];
+                               System.Diagnostics.Debug.Assert(upto != slice.Length);
+                       }
+               }
+               
+               public int GetAddress()
+               {
+                       return upto + (offset0 & DocumentsWriter.BYTE_BLOCK_NOT_MASK);
+               }
+               
+               public void  WriteVInt(int i)
+               {
+                       while ((i & ~ 0x7F) != 0)
+                       {
+                               WriteByte((byte) ((i & 0x7f) | 0x80));
+                               i = SupportClass.Number.URShift(i, 7);
+                       }
+                       WriteByte((byte) i);
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/CharBlockPool.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/CharBlockPool.cs
new file mode 100644 (file)
index 0000000..e032b55
--- /dev/null
@@ -0,0 +1,69 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       sealed class CharBlockPool
+       {
+               private void  InitBlock()
+               {
+                       charUpto = DocumentsWriter.CHAR_BLOCK_SIZE;
+               }
+               
+               public char[][] buffers = new char[10][];
+               internal int numBuffer;
+               
+               internal int bufferUpto = - 1; // Which buffer we are upto
+               public int charUpto; // Where we are in head buffer
+               
+               public char[] buffer; // Current head buffer
+               public int charOffset = - DocumentsWriter.CHAR_BLOCK_SIZE; // Current head offset
+               private DocumentsWriter docWriter;
+               
+               public CharBlockPool(DocumentsWriter docWriter)
+               {
+                       InitBlock();
+                       this.docWriter = docWriter;
+               }
+               
+               public void  Reset()
+               {
+                       docWriter.RecycleCharBlocks(buffers, 1 + bufferUpto);
+                       bufferUpto = - 1;
+                       charUpto = DocumentsWriter.CHAR_BLOCK_SIZE;
+                       charOffset = - DocumentsWriter.CHAR_BLOCK_SIZE;
+               }
+               
+               public void  NextBuffer()
+               {
+                       if (1 + bufferUpto == buffers.Length)
+                       {
+                               char[][] newBuffers = new char[(int) (buffers.Length * 1.5)][];
+                               Array.Copy(buffers, 0, newBuffers, 0, buffers.Length);
+                               buffers = newBuffers;
+                       }
+                       buffer = buffers[1 + bufferUpto] = docWriter.GetCharBlock();
+                       bufferUpto++;
+                       
+                       charUpto = 0;
+                       charOffset += DocumentsWriter.CHAR_BLOCK_SIZE;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/CheckIndex.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/CheckIndex.cs
new file mode 100644 (file)
index 0000000..ea1d491
--- /dev/null
@@ -0,0 +1,1052 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using AbstractField = Mono.Lucene.Net.Documents.AbstractField;
+using Document = Mono.Lucene.Net.Documents.Document;
+using Directory = Mono.Lucene.Net.Store.Directory;
+using FSDirectory = Mono.Lucene.Net.Store.FSDirectory;
+using IndexInput = Mono.Lucene.Net.Store.IndexInput;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       /// <summary> Basic tool and API to check the health of an index and
+       /// write a new segments file that removes reference to
+       /// problematic segments.
+       /// 
+       /// <p/>As this tool checks every byte in the index, on a large
+       /// index it can take quite a long time to run.
+       /// 
+       /// <p/><b>WARNING</b>: this tool and API is new and
+       /// experimental and is subject to suddenly change in the
+       /// next release.  Please make a complete backup of your
+       /// index before using this to fix your index!
+       /// </summary>
+       public class CheckIndex
+       {
+               
+               /// <summary>Default PrintStream for all CheckIndex instances.</summary>
+               /// <deprecated> Use {@link #setInfoStream} per instance,
+               /// instead. 
+               /// </deprecated>
+        [Obsolete("Use SetInfoStream per instance,instead.")]
+               public static System.IO.StreamWriter out_Renamed = null;
+               
+               private System.IO.StreamWriter infoStream;
+               private Directory dir;
+               
+               /// <summary> Returned from {@link #CheckIndex()} detailing the health and status of the index.
+               /// 
+               /// <p/><b>WARNING</b>: this API is new and experimental and is
+               /// subject to suddenly change in the next release.
+               /// 
+               /// </summary>
+               
+               public class Status
+               {
+                       
+                       /// <summary>True if no problems were found with the index. </summary>
+                       public bool clean;
+                       
+                       /// <summary>True if we were unable to locate and load the segments_N file. </summary>
+                       public bool missingSegments;
+                       
+                       /// <summary>True if we were unable to open the segments_N file. </summary>
+                       public bool cantOpenSegments;
+                       
+                       /// <summary>True if we were unable to read the version number from segments_N file. </summary>
+                       public bool missingSegmentVersion;
+                       
+                       /// <summary>Name of latest segments_N file in the index. </summary>
+                       public System.String segmentsFileName;
+                       
+                       /// <summary>Number of segments in the index. </summary>
+                       public int numSegments;
+                       
+                       /// <summary>String description of the version of the index. </summary>
+                       public System.String segmentFormat;
+                       
+                       /// <summary>Empty unless you passed specific segments list to check as optional 3rd argument.</summary>
+                       /// <seealso cref="CheckIndex.CheckIndex(List)">
+                       /// </seealso>
+                       public System.Collections.IList segmentsChecked = new System.Collections.ArrayList();
+                       
+                       /// <summary>True if the index was created with a newer version of Lucene than the CheckIndex tool. </summary>
+                       public bool toolOutOfDate;
+                       
+                       /// <summary>List of {@link SegmentInfoStatus} instances, detailing status of each segment. </summary>
+                       public System.Collections.IList segmentInfos = new System.Collections.ArrayList();
+                       
+                       /// <summary>Directory index is in. </summary>
+                       public Directory dir;
+                       
+                       /// <summary> SegmentInfos instance containing only segments that
+                       /// had no problems (this is used with the {@link CheckIndex#fixIndex} 
+                       /// method to repair the index. 
+                       /// </summary>
+                       internal SegmentInfos newSegments;
+                       
+                       /// <summary>How many documents will be lost to bad segments. </summary>
+                       public int totLoseDocCount;
+                       
+                       /// <summary>How many bad segments were found. </summary>
+                       public int numBadSegments;
+                       
+                       /// <summary>True if we checked only specific segments ({@link
+                       /// #CheckIndex(List)}) was called with non-null
+                       /// argument). 
+                       /// </summary>
+                       public bool partial;
+                       
+                       /// <summary>Holds the userData of the last commit in the index </summary>
+            public System.Collections.Generic.IDictionary<string, string> userData;
+                       
+                       /// <summary>Holds the status of each segment in the index.
+                       /// See {@link #segmentInfos}.
+                       /// 
+                       /// <p/><b>WARNING</b>: this API is new and experimental and is
+                       /// subject to suddenly change in the next release.
+                       /// </summary>
+                       public class SegmentInfoStatus
+                       {
+                               /// <summary>Name of the segment. </summary>
+                               public System.String name;
+                               
+                               /// <summary>Document count (does not take deletions into account). </summary>
+                               public int docCount;
+                               
+                               /// <summary>True if segment is compound file format. </summary>
+                               public bool compound;
+                               
+                               /// <summary>Number of files referenced by this segment. </summary>
+                               public int numFiles;
+                               
+                               /// <summary>Net size (MB) of the files referenced by this
+                               /// segment. 
+                               /// </summary>
+                               public double sizeMB;
+                               
+                               /// <summary>Doc store offset, if this segment shares the doc
+                               /// store files (stored fields and term vectors) with
+                               /// other segments.  This is -1 if it does not share. 
+                               /// </summary>
+                               public int docStoreOffset = - 1;
+                               
+                               /// <summary>String of the shared doc store segment, or null if
+                               /// this segment does not share the doc store files. 
+                               /// </summary>
+                               public System.String docStoreSegment;
+                               
+                               /// <summary>True if the shared doc store files are compound file
+                               /// format. 
+                               /// </summary>
+                               public bool docStoreCompoundFile;
+                               
+                               /// <summary>True if this segment has pending deletions. </summary>
+                               public bool hasDeletions;
+                               
+                               /// <summary>Name of the current deletions file name. </summary>
+                               public System.String deletionsFileName;
+                               
+                               /// <summary>Number of deleted documents. </summary>
+                               public int numDeleted;
+                               
+                               /// <summary>True if we were able to open a SegmentReader on this
+                               /// segment. 
+                               /// </summary>
+                               public bool openReaderPassed;
+                               
+                               /// <summary>Number of fields in this segment. </summary>
+                               internal int numFields;
+                               
+                               /// <summary>True if at least one of the fields in this segment
+                               /// does not omitTermFreqAndPositions.
+                               /// </summary>
+                               /// <seealso cref="AbstractField.setOmitTermFreqAndPositions">
+                               /// </seealso>
+                               public bool hasProx;
+
+                /// <summary>Map&lt;String, String&gt; that includes certain
+                               /// debugging details that IndexWriter records into
+                               /// each segment it creates 
+                               /// </summary>
+                public System.Collections.Generic.IDictionary<string, string> diagnostics;
+                               
+                               /// <summary>Status for testing of field norms (null if field norms could not be tested). </summary>
+                               public FieldNormStatus fieldNormStatus;
+                               
+                               /// <summary>Status for testing of indexed terms (null if indexed terms could not be tested). </summary>
+                               public TermIndexStatus termIndexStatus;
+                               
+                               /// <summary>Status for testing of stored fields (null if stored fields could not be tested). </summary>
+                               public StoredFieldStatus storedFieldStatus;
+                               
+                               /// <summary>Status for testing of term vectors (null if term vectors could not be tested). </summary>
+                               public TermVectorStatus termVectorStatus;
+                       }
+                       
+                       /// <summary> Status from testing field norms.</summary>
+                       public sealed class FieldNormStatus
+                       {
+                               /// <summary>Number of fields successfully tested </summary>
+                               public long totFields = 0L;
+                               
+                               /// <summary>Exception thrown during term index test (null on success) </summary>
+                               public System.Exception error = null;
+                       }
+                       
+                       /// <summary> Status from testing term index.</summary>
+                       public sealed class TermIndexStatus
+                       {
+                               /// <summary>Total term count </summary>
+                               public long termCount = 0L;
+                               
+                               /// <summary>Total frequency across all terms. </summary>
+                               public long totFreq = 0L;
+                               
+                               /// <summary>Total number of positions. </summary>
+                               public long totPos = 0L;
+                               
+                               /// <summary>Exception thrown during term index test (null on success) </summary>
+                               public System.Exception error = null;
+                       }
+                       
+                       /// <summary> Status from testing stored fields.</summary>
+                       public sealed class StoredFieldStatus
+                       {
+                               
+                               /// <summary>Number of documents tested. </summary>
+                               public int docCount = 0;
+                               
+                               /// <summary>Total number of stored fields tested. </summary>
+                               public long totFields = 0;
+                               
+                               /// <summary>Exception thrown during stored fields test (null on success) </summary>
+                               public System.Exception error = null;
+                       }
+                       
+                       /// <summary> Status from testing stored fields.</summary>
+                       public sealed class TermVectorStatus
+                       {
+                               
+                               /// <summary>Number of documents tested. </summary>
+                               public int docCount = 0;
+                               
+                               /// <summary>Total number of term vectors tested. </summary>
+                               public long totVectors = 0;
+                               
+                               /// <summary>Exception thrown during term vector test (null on success) </summary>
+                               public System.Exception error = null;
+                       }
+               }
+               
+               /// <summary>Create a new CheckIndex on the directory. </summary>
+               public CheckIndex(Directory dir)
+               {
+                       this.dir = dir;
+                       infoStream = out_Renamed;
+               }
+               
+               /// <summary>Set infoStream where messages should go.  If null, no
+               /// messages are printed 
+               /// </summary>
+               public virtual void  SetInfoStream(System.IO.StreamWriter out_Renamed)
+               {
+                       infoStream = out_Renamed;
+               }
+               
+               private void  Msg(System.String msg)
+               {
+                       if (infoStream != null)
+                               infoStream.WriteLine(msg);
+               }
+               
+               private class MySegmentTermDocs:SegmentTermDocs
+               {
+                       
+                       internal int delCount;
+                       
+                       internal MySegmentTermDocs(SegmentReader p):base(p)
+                       {
+                       }
+                       
+                       public override void  Seek(Term term)
+                       {
+                               base.Seek(term);
+                               delCount = 0;
+                       }
+                       
+                       protected internal override void  SkippingDoc()
+                       {
+                               delCount++;
+                       }
+               }
+               
+               /// <summary>Returns true if index is clean, else false. </summary>
+               /// <deprecated> Please instantiate a CheckIndex and then use {@link #CheckIndex()} instead 
+               /// </deprecated>
+        [Obsolete("Please instantiate a CheckIndex and then use CheckIndex() instead")]
+               public static bool Check(Directory dir, bool doFix)
+               {
+                       return Check(dir, doFix, null);
+               }
+               
+               /// <summary>Returns true if index is clean, else false.</summary>
+               /// <deprecated> Please instantiate a CheckIndex and then use {@link #CheckIndex(List)} instead 
+               /// </deprecated>
+        [Obsolete("Please instantiate a CheckIndex and then use CheckIndex(List) instead")]
+               public static bool Check(Directory dir, bool doFix, System.Collections.IList onlySegments)
+               {
+                       CheckIndex checker = new CheckIndex(dir);
+                       Status status = checker.CheckIndex_Renamed_Method(onlySegments);
+                       if (doFix && !status.clean)
+                               checker.FixIndex(status);
+                       
+                       return status.clean;
+               }
+               
+               /// <summary>Returns a {@link Status} instance detailing
+               /// the state of the index.
+               /// 
+               /// <p/>As this method checks every byte in the index, on a large
+               /// index it can take quite a long time to run.
+               /// 
+               /// <p/><b>WARNING</b>: make sure
+               /// you only call this when the index is not opened by any
+               /// writer. 
+               /// </summary>
+               public virtual Status CheckIndex_Renamed_Method()
+               {
+                       return CheckIndex_Renamed_Method(null);
+               }
+               
+               /// <summary>Returns a {@link Status} instance detailing
+               /// the state of the index.
+               /// 
+               /// </summary>
+               /// <param name="onlySegments">list of specific segment names to check
+               /// 
+               /// <p/>As this method checks every byte in the specified
+               /// segments, on a large index it can take quite a long
+               /// time to run.
+               /// 
+               /// <p/><b>WARNING</b>: make sure
+               /// you only call this when the index is not opened by any
+               /// writer. 
+               /// </param>
+               public virtual Status CheckIndex_Renamed_Method(System.Collections.IList onlySegments)
+               {
+            System.Globalization.NumberFormatInfo nf = System.Globalization.CultureInfo.CurrentCulture.NumberFormat;
+                       SegmentInfos sis = new SegmentInfos();
+                       Status result = new Status();
+                       result.dir = dir;
+                       try
+                       {
+                               sis.Read(dir);
+                       }
+                       catch (System.Exception t)
+                       {
+                               Msg("ERROR: could not read any segments file in directory");
+                               result.missingSegments = true;
+                               if (infoStream != null)
+                                       infoStream.WriteLine(t.StackTrace);
+                               return result;
+                       }
+                       
+                       int numSegments = sis.Count;
+                       System.String segmentsFileName = sis.GetCurrentSegmentFileName();
+                       IndexInput input = null;
+                       try
+                       {
+                               input = dir.OpenInput(segmentsFileName);
+                       }
+                       catch (System.Exception t)
+                       {
+                               Msg("ERROR: could not open segments file in directory");
+                               if (infoStream != null)
+                                       infoStream.WriteLine(t.StackTrace);
+                               result.cantOpenSegments = true;
+                               return result;
+                       }
+                       int format = 0;
+                       try
+                       {
+                               format = input.ReadInt();
+                       }
+                       catch (System.Exception t)
+                       {
+                               Msg("ERROR: could not read segment file version in directory");
+                               if (infoStream != null)
+                                       infoStream.WriteLine(t.StackTrace);
+                               result.missingSegmentVersion = true;
+                               return result;
+                       }
+                       finally
+                       {
+                               if (input != null)
+                                       input.Close();
+                       }
+                       
+                       System.String sFormat = "";
+                       bool skip = false;
+                       
+                       if (format == SegmentInfos.FORMAT)
+                               sFormat = "FORMAT [Lucene Pre-2.1]";
+                       if (format == SegmentInfos.FORMAT_LOCKLESS)
+                               sFormat = "FORMAT_LOCKLESS [Lucene 2.1]";
+                       else if (format == SegmentInfos.FORMAT_SINGLE_NORM_FILE)
+                               sFormat = "FORMAT_SINGLE_NORM_FILE [Lucene 2.2]";
+                       else if (format == SegmentInfos.FORMAT_SHARED_DOC_STORE)
+                               sFormat = "FORMAT_SHARED_DOC_STORE [Lucene 2.3]";
+                       else
+                       {
+                               if (format == SegmentInfos.FORMAT_CHECKSUM)
+                                       sFormat = "FORMAT_CHECKSUM [Lucene 2.4]";
+                               else if (format == SegmentInfos.FORMAT_DEL_COUNT)
+                                       sFormat = "FORMAT_DEL_COUNT [Lucene 2.4]";
+                               else if (format == SegmentInfos.FORMAT_HAS_PROX)
+                                       sFormat = "FORMAT_HAS_PROX [Lucene 2.4]";
+                               else if (format == SegmentInfos.FORMAT_USER_DATA)
+                                       sFormat = "FORMAT_USER_DATA [Lucene 2.9]";
+                               else if (format == SegmentInfos.FORMAT_DIAGNOSTICS)
+                                       sFormat = "FORMAT_DIAGNOSTICS [Lucene 2.9]";
+                               else if (format < SegmentInfos.CURRENT_FORMAT)
+                               {
+                                       sFormat = "int=" + format + " [newer version of Lucene than this tool]";
+                                       skip = true;
+                               }
+                               else
+                               {
+                                       sFormat = format + " [Lucene 1.3 or prior]";
+                               }
+                       }
+                       
+                       result.segmentsFileName = segmentsFileName;
+                       result.numSegments = numSegments;
+                       result.segmentFormat = sFormat;
+                       result.userData = sis.GetUserData();
+                       System.String userDataString;
+                       if (sis.GetUserData().Count > 0)
+                       {
+                               userDataString = " userData=" + SupportClass.CollectionsHelper.CollectionToString(sis.GetUserData());
+                       }
+                       else
+                       {
+                               userDataString = "";
+                       }
+                       
+                       Msg("Segments file=" + segmentsFileName + " numSegments=" + numSegments + " version=" + sFormat + userDataString);
+                       
+                       if (onlySegments != null)
+                       {
+                               result.partial = true;
+                               if (infoStream != null)
+                                       infoStream.Write("\nChecking only these segments:");
+                               System.Collections.IEnumerator it = onlySegments.GetEnumerator();
+                               while (it.MoveNext())
+                               {
+                                       if (infoStream != null)
+                                       {
+                                               infoStream.Write(" " + it.Current);
+                                       }
+                               }
+                System.Collections.IEnumerator e = onlySegments.GetEnumerator();
+                while (e.MoveNext() == true)
+                {
+                    result.segmentsChecked.Add(e.Current);
+                }
+                Msg(":");
+                       }
+                       
+                       if (skip)
+                       {
+                               Msg("\nERROR: this index appears to be created by a newer version of Lucene than this tool was compiled on; please re-compile this tool on the matching version of Lucene; exiting");
+                               result.toolOutOfDate = true;
+                               return result;
+                       }
+                       
+                       
+                       result.newSegments = (SegmentInfos) sis.Clone();
+                       result.newSegments.Clear();
+                       
+                       for (int i = 0; i < numSegments; i++)
+                       {
+                               SegmentInfo info = sis.Info(i);
+                               if (onlySegments != null && !onlySegments.Contains(info.name))
+                                       continue;
+                               Status.SegmentInfoStatus segInfoStat = new Status.SegmentInfoStatus();
+                               result.segmentInfos.Add(segInfoStat);
+                               Msg("  " + (1 + i) + " of " + numSegments + ": name=" + info.name + " docCount=" + info.docCount);
+                               segInfoStat.name = info.name;
+                               segInfoStat.docCount = info.docCount;
+                               
+                               int toLoseDocCount = info.docCount;
+                               
+                               SegmentReader reader = null;
+                               
+                               try
+                               {
+                                       Msg("    compound=" + info.GetUseCompoundFile());
+                                       segInfoStat.compound = info.GetUseCompoundFile();
+                                       Msg("    hasProx=" + info.GetHasProx());
+                                       segInfoStat.hasProx = info.GetHasProx();
+                                       Msg("    numFiles=" + info.Files().Count);
+                                       segInfoStat.numFiles = info.Files().Count;
+                                       Msg(System.String.Format(nf, "    size (MB)={0:f}", new System.Object[] { (info.SizeInBytes() / (1024.0 * 1024.0)) }));
+                                       segInfoStat.sizeMB = info.SizeInBytes() / (1024.0 * 1024.0);
+                    System.Collections.Generic.IDictionary<string, string> diagnostics = info.GetDiagnostics();
+                                       segInfoStat.diagnostics = diagnostics;
+                                       if (diagnostics.Count > 0)
+                                       {
+                                               Msg("    diagnostics = " + SupportClass.CollectionsHelper.CollectionToString(diagnostics));
+                                       }
+                                       
+                                       int docStoreOffset = info.GetDocStoreOffset();
+                                       if (docStoreOffset != - 1)
+                                       {
+                                               Msg("    docStoreOffset=" + docStoreOffset);
+                                               segInfoStat.docStoreOffset = docStoreOffset;
+                                               Msg("    docStoreSegment=" + info.GetDocStoreSegment());
+                                               segInfoStat.docStoreSegment = info.GetDocStoreSegment();
+                                               Msg("    docStoreIsCompoundFile=" + info.GetDocStoreIsCompoundFile());
+                                               segInfoStat.docStoreCompoundFile = info.GetDocStoreIsCompoundFile();
+                                       }
+                                       System.String delFileName = info.GetDelFileName();
+                                       if (delFileName == null)
+                                       {
+                                               Msg("    no deletions");
+                                               segInfoStat.hasDeletions = false;
+                                       }
+                                       else
+                                       {
+                                               Msg("    has deletions [delFileName=" + delFileName + "]");
+                                               segInfoStat.hasDeletions = true;
+                                               segInfoStat.deletionsFileName = delFileName;
+                                       }
+                                       if (infoStream != null)
+                                               infoStream.Write("    test: open reader.........");
+                                       reader = SegmentReader.Get(info);
+                                       
+                                       segInfoStat.openReaderPassed = true;
+                                       
+                                       int numDocs = reader.NumDocs();
+                                       toLoseDocCount = numDocs;
+                                       if (reader.HasDeletions())
+                                       {
+                                               if (reader.deletedDocs.Count() != info.GetDelCount())
+                                               {
+                                                       throw new System.SystemException("delete count mismatch: info=" + info.GetDelCount() + " vs deletedDocs.count()=" + reader.deletedDocs.Count());
+                                               }
+                                               if (reader.deletedDocs.Count() > reader.MaxDoc())
+                                               {
+                                                       throw new System.SystemException("too many deleted docs: maxDoc()=" + reader.MaxDoc() + " vs deletedDocs.count()=" + reader.deletedDocs.Count());
+                                               }
+                                               if (info.docCount - numDocs != info.GetDelCount())
+                                               {
+                                                       throw new System.SystemException("delete count mismatch: info=" + info.GetDelCount() + " vs reader=" + (info.docCount - numDocs));
+                                               }
+                                               segInfoStat.numDeleted = info.docCount - numDocs;
+                                               Msg("OK [" + (segInfoStat.numDeleted) + " deleted docs]");
+                                       }
+                                       else
+                                       {
+                                               if (info.GetDelCount() != 0)
+                                               {
+                                                       throw new System.SystemException("delete count mismatch: info=" + info.GetDelCount() + " vs reader=" + (info.docCount - numDocs));
+                                               }
+                                               Msg("OK");
+                                       }
+                                       if (reader.MaxDoc() != info.docCount)
+                                               throw new System.SystemException("SegmentReader.maxDoc() " + reader.MaxDoc() + " != SegmentInfos.docCount " + info.docCount);
+                                       
+                                       // Test getFieldNames()
+                                       if (infoStream != null)
+                                       {
+                                               infoStream.Write("    test: fields..............");
+                                       }
+                    System.Collections.Generic.ICollection<string> fieldNames = reader.GetFieldNames(IndexReader.FieldOption.ALL);
+                                       Msg("OK [" + fieldNames.Count + " fields]");
+                                       segInfoStat.numFields = fieldNames.Count;
+                                       
+                                       // Test Field Norms
+                                       segInfoStat.fieldNormStatus = TestFieldNorms(fieldNames, reader);
+                                       
+                                       // Test the Term Index
+                                       segInfoStat.termIndexStatus = TestTermIndex(info, reader);
+                                       
+                                       // Test Stored Fields
+                                       segInfoStat.storedFieldStatus = TestStoredFields(info, reader, nf);
+                                       
+                                       // Test Term Vectors
+                                       segInfoStat.termVectorStatus = TestTermVectors(info, reader, nf);
+                                       
+                                       // Rethrow the first exception we encountered
+                                       //  This will cause stats for failed segments to be incremented properly
+                                       if (segInfoStat.fieldNormStatus.error != null)
+                                       {
+                                               throw new System.SystemException("Field Norm test failed");
+                                       }
+                                       else if (segInfoStat.termIndexStatus.error != null)
+                                       {
+                                               throw new System.SystemException("Term Index test failed");
+                                       }
+                                       else if (segInfoStat.storedFieldStatus.error != null)
+                                       {
+                                               throw new System.SystemException("Stored Field test failed");
+                                       }
+                                       else if (segInfoStat.termVectorStatus.error != null)
+                                       {
+                                               throw new System.SystemException("Term Vector test failed");
+                                       }
+                                       
+                                       Msg("");
+                               }
+                               catch (System.Exception t)
+                               {
+                                       Msg("FAILED");
+                                       System.String comment;
+                                       comment = "fixIndex() would remove reference to this segment";
+                                       Msg("    WARNING: " + comment + "; full exception:");
+                                       if (infoStream != null)
+                                               infoStream.WriteLine(t.StackTrace);
+                                       Msg("");
+                                       result.totLoseDocCount += toLoseDocCount;
+                                       result.numBadSegments++;
+                                       continue;
+                               }
+                               finally
+                               {
+                                       if (reader != null)
+                                               reader.Close();
+                               }
+                               
+                               // Keeper
+                               result.newSegments.Add(info.Clone());
+                       }
+                       
+                       if (0 == result.numBadSegments)
+                       {
+                               result.clean = true;
+                               Msg("No problems were detected with this index.\n");
+                       }
+                       else
+                               Msg("WARNING: " + result.numBadSegments + " broken segments (containing " + result.totLoseDocCount + " documents) detected");
+                       
+                       return result;
+               }
+               
+               /// <summary> Test field norms.</summary>
+        private Status.FieldNormStatus TestFieldNorms(System.Collections.Generic.ICollection<string> fieldNames, SegmentReader reader)
+               {
+                       Status.FieldNormStatus status = new Status.FieldNormStatus();
+                       
+                       try
+                       {
+                               // Test Field Norms
+                               if (infoStream != null)
+                               {
+                                       infoStream.Write("    test: field norms.........");
+                               }
+                               System.Collections.IEnumerator it = fieldNames.GetEnumerator();
+                               byte[] b = new byte[reader.MaxDoc()];
+                               while (it.MoveNext())
+                               {
+                                       System.String fieldName = (System.String) it.Current;
+                    if (reader.HasNorms(fieldName))
+                    {
+                        reader.Norms(fieldName, b, 0);
+                        ++status.totFields;
+                    }
+                               }
+                               
+                               Msg("OK [" + status.totFields + " fields]");
+                       }
+                       catch (System.Exception e)
+                       {
+                               Msg("ERROR [" + System.Convert.ToString(e.Message) + "]");
+                               status.error = e;
+                               if (infoStream != null)
+                               {
+                                       infoStream.WriteLine(e.StackTrace);
+                               }
+                       }
+                       
+                       return status;
+               }
+               
+               /// <summary> Test the term index.</summary>
+               private Status.TermIndexStatus TestTermIndex(SegmentInfo info, SegmentReader reader)
+               {
+                       Status.TermIndexStatus status = new Status.TermIndexStatus();
+                       
+                       try
+                       {
+                               if (infoStream != null)
+                               {
+                                       infoStream.Write("    test: terms, freq, prox...");
+                               }
+                               
+                               TermEnum termEnum = reader.Terms();
+                               TermPositions termPositions = reader.TermPositions();
+                               
+                               // Used only to count up # deleted docs for this term
+                               MySegmentTermDocs myTermDocs = new MySegmentTermDocs(reader);
+                               
+                               int maxDoc = reader.MaxDoc();
+                               
+                               while (termEnum.Next())
+                               {
+                                       status.termCount++;
+                                       Term term = termEnum.Term();
+                                       int docFreq = termEnum.DocFreq();
+                                       termPositions.Seek(term);
+                                       int lastDoc = - 1;
+                                       int freq0 = 0;
+                                       status.totFreq += docFreq;
+                                       while (termPositions.Next())
+                                       {
+                                               freq0++;
+                                               int doc = termPositions.Doc();
+                                               int freq = termPositions.Freq();
+                                               if (doc <= lastDoc)
+                                               {
+                                                       throw new System.SystemException("term " + term + ": doc " + doc + " <= lastDoc " + lastDoc);
+                                               }
+                                               if (doc >= maxDoc)
+                                               {
+                                                       throw new System.SystemException("term " + term + ": doc " + doc + " >= maxDoc " + maxDoc);
+                                               }
+                                               
+                                               lastDoc = doc;
+                                               if (freq <= 0)
+                                               {
+                                                       throw new System.SystemException("term " + term + ": doc " + doc + ": freq " + freq + " is out of bounds");
+                                               }
+                                               
+                                               int lastPos = - 1;
+                                               status.totPos += freq;
+                                               for (int j = 0; j < freq; j++)
+                                               {
+                                                       int pos = termPositions.NextPosition();
+                                                       if (pos < - 1)
+                                                       {
+                                                               throw new System.SystemException("term " + term + ": doc " + doc + ": pos " + pos + " is out of bounds");
+                                                       }
+                                                       if (pos < lastPos)
+                                                       {
+                                                               throw new System.SystemException("term " + term + ": doc " + doc + ": pos " + pos + " < lastPos " + lastPos);
+                                                       }
+                                               }
+                                       }
+                                       
+                                       // Now count how many deleted docs occurred in
+                                       // this term:
+                                       int delCount;
+                                       if (reader.HasDeletions())
+                                       {
+                                               myTermDocs.Seek(term);
+                                               while (myTermDocs.Next())
+                                               {
+                                               }
+                                               delCount = myTermDocs.delCount;
+                                       }
+                                       else
+                                       {
+                                               delCount = 0;
+                                       }
+                                       
+                                       if (freq0 + delCount != docFreq)
+                                       {
+                                               throw new System.SystemException("term " + term + " docFreq=" + docFreq + " != num docs seen " + freq0 + " + num docs deleted " + delCount);
+                                       }
+                               }
+                               
+                               Msg("OK [" + status.termCount + " terms; " + status.totFreq + " terms/docs pairs; " + status.totPos + " tokens]");
+                       }
+                       catch (System.Exception e)
+                       {
+                               Msg("ERROR [" + System.Convert.ToString(e.Message) + "]");
+                               status.error = e;
+                               if (infoStream != null)
+                               {
+                                       infoStream.WriteLine(e.StackTrace);
+                               }
+                       }
+                       
+                       return status;
+               }
+               
+               /// <summary> Test stored fields for a segment.</summary>
+               private Status.StoredFieldStatus TestStoredFields(SegmentInfo info, SegmentReader reader, System.Globalization.NumberFormatInfo format)
+               {
+                       Status.StoredFieldStatus status = new Status.StoredFieldStatus();
+                       
+                       try
+                       {
+                               if (infoStream != null)
+                               {
+                                       infoStream.Write("    test: stored fields.......");
+                               }
+                               
+                               // Scan stored fields for all documents
+                               for (int j = 0; j < info.docCount; ++j)
+                               {
+                                       if (!reader.IsDeleted(j))
+                                       {
+                                               status.docCount++;
+                                               Document doc = reader.Document(j);
+                                               status.totFields += doc.GetFields().Count;
+                                       }
+                               }
+                               
+                               // Validate docCount
+                               if (status.docCount != reader.NumDocs())
+                               {
+                                       throw new System.SystemException("docCount=" + status.docCount + " but saw " + status.docCount + " undeleted docs");
+                               }
+                               
+                Msg(string.Format(format, "OK [{0:d} total field count; avg {1:f} fields per doc]", new object[] { status.totFields, (((float) status.totFields) / status.docCount) }));
+            }
+                       catch (System.Exception e)
+                       {
+                               Msg("ERROR [" + System.Convert.ToString(e.Message) + "]");
+                               status.error = e;
+                               if (infoStream != null)
+                               {
+                                       infoStream.WriteLine(e.StackTrace);
+                               }
+                       }
+                       
+                       return status;
+               }
+               
+               /// <summary> Test term vectors for a segment.</summary>
+        private Status.TermVectorStatus TestTermVectors(SegmentInfo info, SegmentReader reader, System.Globalization.NumberFormatInfo format)
+               {
+                       Status.TermVectorStatus status = new Status.TermVectorStatus();
+                       
+                       try
+                       {
+                               if (infoStream != null)
+                               {
+                                       infoStream.Write("    test: term vectors........");
+                               }
+                               
+                               for (int j = 0; j < info.docCount; ++j)
+                               {
+                                       if (!reader.IsDeleted(j))
+                                       {
+                                               status.docCount++;
+                                               TermFreqVector[] tfv = reader.GetTermFreqVectors(j);
+                                               if (tfv != null)
+                                               {
+                                                       status.totVectors += tfv.Length;
+                                               }
+                                       }
+                               }
+                               
+                Msg(System.String.Format(format, "OK [{0:d} total vector count; avg {1:f} term/freq vector fields per doc]", new object[] { status.totVectors, (((float) status.totVectors) / status.docCount) }));
+            }
+                       catch (System.Exception e)
+                       {
+                               Msg("ERROR [" + System.Convert.ToString(e.Message) + "]");
+                               status.error = e;
+                               if (infoStream != null)
+                               {
+                                       infoStream.WriteLine(e.StackTrace);
+                               }
+                       }
+                       
+                       return status;
+               }
+               
+               /// <summary>Repairs the index using previously returned result
+               /// from {@link #checkIndex}.  Note that this does not
+               /// remove any of the unreferenced files after it's done;
+               /// you must separately open an {@link IndexWriter}, which
+               /// deletes unreferenced files when it's created.
+               /// 
+               /// <p/><b>WARNING</b>: this writes a
+               /// new segments file into the index, effectively removing
+               /// all documents in broken segments from the index.
+               /// BE CAREFUL.
+               /// 
+               /// <p/><b>WARNING</b>: Make sure you only call this when the
+               /// index is not opened  by any writer. 
+               /// </summary>
+               public virtual void  FixIndex(Status result)
+               {
+                       if (result.partial)
+                               throw new System.ArgumentException("can only fix an index that was fully checked (this status checked a subset of segments)");
+                       result.newSegments.Commit(result.dir);
+               }
+               
+               private static bool assertsOn;
+               
+               private static bool TestAsserts()
+               {
+                       assertsOn = true;
+                       return true;
+               }
+               
+               private static bool AssertsOn()
+               {
+                       System.Diagnostics.Debug.Assert(TestAsserts());
+                       return assertsOn;
+               }
+               
+               /// <summary>Command-line interface to check and fix an index.
+               /// <p/>
+               /// Run it like this:
+               /// <pre>
+               /// java -ea:Mono.Lucene.Net... Mono.Lucene.Net.Index.CheckIndex pathToIndex [-fix] [-segment X] [-segment Y]
+               /// </pre>
+               /// <ul>
+               /// <li><code>-fix</code>: actually write a new segments_N file, removing any problematic segments</li>
+               /// <li><code>-segment X</code>: only check the specified
+               /// segment(s).  This can be specified multiple times,
+               /// to check more than one segment, eg <code>-segment _2
+               /// -segment _a</code>.  You can't use this with the -fix
+               /// option.</li>
+               /// </ul>
+               /// <p/><b>WARNING</b>: <code>-fix</code> should only be used on an emergency basis as it will cause
+               /// documents (perhaps many) to be permanently removed from the index.  Always make
+               /// a backup copy of your index before running this!  Do not run this tool on an index
+               /// that is actively being written to.  You have been warned!
+               /// <p/>                Run without -fix, this tool will open the index, report version information
+               /// and report any exceptions it hits and what action it would take if -fix were
+               /// specified.  With -fix, this tool will remove any segments that have issues and
+               /// write a new segments_N file.  This means all documents contained in the affected
+               /// segments will be removed.
+               /// <p/>
+               /// This tool exits with exit code 1 if the index cannot be opened or has any
+               /// corruption, else 0.
+               /// </summary>
+               [STAThread]
+               public static void  Main(System.String[] args)
+               {
+                       
+                       bool doFix = false;
+                       System.Collections.IList onlySegments = new System.Collections.ArrayList();
+                       System.String indexPath = null;
+                       int i = 0;
+                       while (i < args.Length)
+                       {
+                               if (args[i].Equals("-fix"))
+                               {
+                                       doFix = true;
+                                       i++;
+                               }
+                               else if (args[i].Equals("-segment"))
+                               {
+                                       if (i == args.Length - 1)
+                                       {
+                                               System.Console.Out.WriteLine("ERROR: missing name for -segment option");
+                                               System.Environment.Exit(1);
+                                       }
+                                       onlySegments.Add(args[i + 1]);
+                                       i += 2;
+                               }
+                               else
+                               {
+                                       if (indexPath != null)
+                                       {
+                                               System.Console.Out.WriteLine("ERROR: unexpected extra argument '" + args[i] + "'");
+                                               System.Environment.Exit(1);
+                                       }
+                                       indexPath = args[i];
+                                       i++;
+                               }
+                       }
+                       
+                       if (indexPath == null)
+                       {
+                               System.Console.Out.WriteLine("\nERROR: index path not specified");
+                               System.Console.Out.WriteLine("\nUsage: java Mono.Lucene.Net.Index.CheckIndex pathToIndex [-fix] [-segment X] [-segment Y]\n" + "\n" + "  -fix: actually write a new segments_N file, removing any problematic segments\n" + "  -segment X: only check the specified segments.  This can be specified multiple\n" + "              times, to check more than one segment, eg '-segment _2 -segment _a'.\n" + "              You can't use this with the -fix option\n" + "\n" + "**WARNING**: -fix should only be used on an emergency basis as it will cause\n" + "documents (perhaps many) to be permanently removed from the index.  Always make\n" + "a backup copy of your index before running this!  Do not run this tool on an index\n" + "that is actively being written to.  You have been warned!\n" + "\n" + "Run without -fix, this tool will open the index, report version information\n" + "and report any exceptions it hits and what action it would take if -fix were\n" + "specified.  With -fix, this tool will remove any segments that have issues and\n" + "write a new segments_N file.  This means all documents contained in the affected\n" + "segments will be removed.\n" + "\n" + "This tool exits with exit code 1 if the index cannot be opened or has any\n" + "corruption, else 0.\n");
+                               System.Environment.Exit(1);
+                       }
+                       
+                       if (!AssertsOn())
+                               System.Console.Out.WriteLine("\nNOTE: testing will be more thorough if you run java with '-ea:Mono.Lucene.Net...', so assertions are enabled");
+                       
+                       if (onlySegments.Count == 0)
+                               onlySegments = null;
+                       else if (doFix)
+                       {
+                               System.Console.Out.WriteLine("ERROR: cannot specify both -fix and -segment");
+                               System.Environment.Exit(1);
+                       }
+                       
+                       System.Console.Out.WriteLine("\nOpening index @ " + indexPath + "\n");
+                       Directory dir = null;
+                       try
+                       {
+                               dir = FSDirectory.Open(new System.IO.FileInfo(indexPath));
+                       }
+                       catch (System.Exception t)
+                       {
+                               System.Console.Out.WriteLine("ERROR: could not open directory \"" + indexPath + "\"; exiting");
+                               System.Console.Out.WriteLine(t.StackTrace);
+                               System.Environment.Exit(1);
+                       }
+                       
+                       CheckIndex checker = new CheckIndex(dir);
+                       System.IO.StreamWriter temp_writer;
+                       temp_writer = new System.IO.StreamWriter(System.Console.OpenStandardOutput(), System.Console.Out.Encoding);
+                       temp_writer.AutoFlush = true;
+                       checker.SetInfoStream(temp_writer);
+                       
+                       Status result = checker.CheckIndex_Renamed_Method(onlySegments);
+                       if (result.missingSegments)
+                       {
+                               System.Environment.Exit(1);
+                       }
+                       
+                       if (!result.clean)
+                       {
+                               if (!doFix)
+                               {
+                                       System.Console.Out.WriteLine("WARNING: would write new segments file, and " + result.totLoseDocCount + " documents would be lost, if -fix were specified\n");
+                               }
+                               else
+                               {
+                                       System.Console.Out.WriteLine("WARNING: " + result.totLoseDocCount + " documents will be lost\n");
+                                       System.Console.Out.WriteLine("NOTE: will write new segments file in 5 seconds; this will remove " + result.totLoseDocCount + " docs from the index. THIS IS YOUR LAST CHANCE TO CTRL+C!");
+                                       for (int s = 0; s < 5; s++)
+                                       {
+                                               System.Threading.Thread.Sleep(new System.TimeSpan((System.Int64) 10000 * 1000));
+                                               System.Console.Out.WriteLine("  " + (5 - s) + "...");
+                                       }
+                                       System.Console.Out.WriteLine("Writing...");
+                                       checker.FixIndex(result);
+                                       System.Console.Out.WriteLine("OK");
+                                       System.Console.Out.WriteLine("Wrote new segments file \"" + result.newSegments.GetCurrentSegmentFileName() + "\"");
+                               }
+                       }
+                       System.Console.Out.WriteLine("");
+                       
+                       int exitCode;
+                       if (result != null && result.clean == true)
+                               exitCode = 0;
+                       else
+                               exitCode = 1;
+                       System.Environment.Exit(exitCode);
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/CompoundFileReader.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/CompoundFileReader.cs
new file mode 100644 (file)
index 0000000..c8ed868
--- /dev/null
@@ -0,0 +1,313 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using BufferedIndexInput = Mono.Lucene.Net.Store.BufferedIndexInput;
+using Directory = Mono.Lucene.Net.Store.Directory;
+using IndexInput = Mono.Lucene.Net.Store.IndexInput;
+using IndexOutput = Mono.Lucene.Net.Store.IndexOutput;
+using Lock = Mono.Lucene.Net.Store.Lock;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       
+       /// <summary> Class for accessing a compound stream.
+       /// This class implements a directory, but is limited to only read operations.
+       /// Directory methods that would normally modify data throw an exception.
+       /// 
+       /// 
+       /// </summary>
+       /// <version>  $Id: CompoundFileReader.java 673371 2008-07-02 11:57:27Z mikemccand $
+       /// </version>
+       public class CompoundFileReader:Directory
+       {
+               
+               private int readBufferSize;
+               
+               private sealed class FileEntry
+               {
+                       internal long offset;
+                       internal long length;
+               }
+               
+               
+               // Base info
+               private Directory directory;
+               private System.String fileName;
+               
+               private IndexInput stream;
+               private System.Collections.Hashtable entries = new System.Collections.Hashtable();
+               
+               
+               public CompoundFileReader(Directory dir, System.String name):this(dir, name, BufferedIndexInput.BUFFER_SIZE)
+               {
+               }
+               
+               public CompoundFileReader(Directory dir, System.String name, int readBufferSize)
+               {
+                       directory = dir;
+                       fileName = name;
+                       this.readBufferSize = readBufferSize;
+                       
+                       bool success = false;
+                       
+                       try
+                       {
+                               stream = dir.OpenInput(name, readBufferSize);
+                               
+                               // read the directory and init files
+                               int count = stream.ReadVInt();
+                               FileEntry entry = null;
+                               for (int i = 0; i < count; i++)
+                               {
+                                       long offset = stream.ReadLong();
+                                       System.String id = stream.ReadString();
+                                       
+                                       if (entry != null)
+                                       {
+                                               // set length of the previous entry
+                                               entry.length = offset - entry.offset;
+                                       }
+                                       
+                                       entry = new FileEntry();
+                                       entry.offset = offset;
+                                       entries[id] = entry;
+                               }
+                               
+                               // set the length of the final entry
+                               if (entry != null)
+                               {
+                                       entry.length = stream.Length() - entry.offset;
+                               }
+                               
+                               success = true;
+                       }
+                       finally
+                       {
+                               if (!success && (stream != null))
+                               {
+                                       try
+                                       {
+                                               stream.Close();
+                                       }
+                                       catch (System.IO.IOException e)
+                                       {
+                                       }
+                               }
+                       }
+               }
+               
+               public virtual Directory GetDirectory()
+               {
+                       return directory;
+               }
+               
+               public virtual System.String GetName()
+               {
+                       return fileName;
+               }
+               
+               public override void  Close()
+               {
+                       lock (this)
+                       {
+                               if (stream == null)
+                                       throw new System.IO.IOException("Already closed");
+                               
+                               entries.Clear();
+                               stream.Close();
+                               stream = null;
+                       }
+               }
+
+        /// <summary>
+        /// .NET
+        /// </summary>
+        public override void Dispose()
+        {
+            Close();
+        }
+               
+               public override IndexInput OpenInput(System.String id)
+               {
+                       lock (this)
+                       {
+                               // Default to readBufferSize passed in when we were opened
+                               return OpenInput(id, readBufferSize);
+                       }
+               }
+               
+               public override IndexInput OpenInput(System.String id, int readBufferSize)
+               {
+                       lock (this)
+                       {
+                               if (stream == null)
+                                       throw new System.IO.IOException("Stream closed");
+                               
+                               FileEntry entry = (FileEntry) entries[id];
+                               if (entry == null)
+                                       throw new System.IO.IOException("No sub-file with id " + id + " found");
+                               
+                               return new CSIndexInput(stream, entry.offset, entry.length, readBufferSize);
+                       }
+               }
+               
+               /// <summary>Returns an array of strings, one for each file in the directory. </summary>
+        [Obsolete("Mono.Lucene.Net-2.9.1. This method overrides obsolete member Mono.Lucene.Net.Store.Directory.List()")]
+               public override System.String[] List()
+               {
+                       System.String[] res = new System.String[entries.Count];
+                       entries.Keys.CopyTo(res, 0);
+                       return res;
+               }
+               
+               /// <summary>Returns true iff a file with the given name exists. </summary>
+               public override bool FileExists(System.String name)
+               {
+                       return entries.ContainsKey(name);
+               }
+               
+               /// <summary>Returns the time the compound file was last modified. </summary>
+               public override long FileModified(System.String name)
+               {
+                       return directory.FileModified(fileName);
+               }
+               
+               /// <summary>Set the modified time of the compound file to now. </summary>
+               public override void  TouchFile(System.String name)
+               {
+                       directory.TouchFile(fileName);
+               }
+               
+               /// <summary>Not implemented</summary>
+               /// <throws>  UnsupportedOperationException  </throws>
+               public override void  DeleteFile(System.String name)
+               {
+                       throw new System.NotSupportedException();
+               }
+               
+               /// <summary>Not implemented</summary>
+               /// <throws>  UnsupportedOperationException  </throws>
+        [Obsolete("Mono.Lucene.Net-2.9.1. This method overrides obsolete member Mono.Lucene.Net.Store.Directory.RenameFile(string, string)")]
+               public override void  RenameFile(System.String from, System.String to)
+               {
+                       throw new System.NotSupportedException();
+               }
+               
+               /// <summary>Returns the length of a file in the directory.</summary>
+               /// <throws>  IOException if the file does not exist  </throws>
+               public override long FileLength(System.String name)
+               {
+                       FileEntry e = (FileEntry) entries[name];
+                       if (e == null)
+                               throw new System.IO.IOException("File " + name + " does not exist");
+                       return e.length;
+               }
+               
+               /// <summary>Not implemented</summary>
+               /// <throws>  UnsupportedOperationException  </throws>
+               public override IndexOutput CreateOutput(System.String name)
+               {
+                       throw new System.NotSupportedException();
+               }
+               
+               /// <summary>Not implemented</summary>
+               /// <throws>  UnsupportedOperationException  </throws>
+               public override Lock MakeLock(System.String name)
+               {
+                       throw new System.NotSupportedException();
+               }
+               
+               /// <summary>Implementation of an IndexInput that reads from a portion of the
+               /// compound file. The visibility is left as "package" *only* because
+               /// this helps with testing since JUnit test cases in a different class
+               /// can then access package fields of this class.
+               /// </summary>
+               public /*internal*/ sealed class CSIndexInput:BufferedIndexInput, System.ICloneable
+               {
+                       
+                       internal IndexInput base_Renamed;
+                       internal long fileOffset;
+                       internal long length;
+                       
+                       internal CSIndexInput(IndexInput base_Renamed, long fileOffset, long length):this(base_Renamed, fileOffset, length, BufferedIndexInput.BUFFER_SIZE)
+                       {
+                       }
+                       
+                       internal CSIndexInput(IndexInput base_Renamed, long fileOffset, long length, int readBufferSize):base(readBufferSize)
+                       {
+                               this.base_Renamed = (IndexInput) base_Renamed.Clone();
+                               this.fileOffset = fileOffset;
+                               this.length = length;
+                       }
+                       
+                       public override System.Object Clone()
+                       {
+                               CSIndexInput clone = (CSIndexInput) base.Clone();
+                               clone.base_Renamed = (IndexInput) base_Renamed.Clone();
+                               clone.fileOffset = fileOffset;
+                               clone.length = length;
+                               return clone;
+                       }
+                       
+                       /// <summary>Expert: implements buffer refill.  Reads bytes from the current
+                       /// position in the input.
+                       /// </summary>
+                       /// <param name="b">the array to read bytes into
+                       /// </param>
+                       /// <param name="offset">the offset in the array to start storing bytes
+                       /// </param>
+                       /// <param name="len">the number of bytes to read
+                       /// </param>
+                       public override void  ReadInternal(byte[] b, int offset, int len)
+                       {
+                               long start = GetFilePointer();
+                               if (start + len > length)
+                                       throw new System.IO.IOException("read past EOF");
+                               base_Renamed.Seek(fileOffset + start);
+                               base_Renamed.ReadBytes(b, offset, len, false);
+                       }
+                       
+                       /// <summary>Expert: implements seek.  Sets current position in this file, where
+                       /// the next {@link #ReadInternal(byte[],int,int)} will occur.
+                       /// </summary>
+                       /// <seealso cref="ReadInternal(byte[],int,int)">
+                       /// </seealso>
+                       public override void  SeekInternal(long pos)
+                       {
+                       }
+                       
+                       /// <summary>Closes the stream to further operations. </summary>
+                       public override void  Close()
+                       {
+                               base_Renamed.Close();
+                       }
+                       
+                       public override long Length()
+                       {
+                               return length;
+                       }
+
+            public IndexInput base_Renamed_ForNUnit
+            {
+                get { return base_Renamed; }
+            }
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/CompoundFileWriter.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/CompoundFileWriter.cs
new file mode 100644 (file)
index 0000000..9b9de50
--- /dev/null
@@ -0,0 +1,276 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using Directory = Mono.Lucene.Net.Store.Directory;
+using IndexInput = Mono.Lucene.Net.Store.IndexInput;
+using IndexOutput = Mono.Lucene.Net.Store.IndexOutput;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       
+       /// <summary> Combines multiple files into a single compound file.
+       /// The file format:<br/>
+       /// <ul>
+       /// <li>VInt fileCount</li>
+       /// <li>{Directory}
+       /// fileCount entries with the following structure:</li>
+       /// <ul>
+       /// <li>long dataOffset</li>
+       /// <li>String fileName</li>
+       /// </ul>
+       /// <li>{File Data}
+       /// fileCount entries with the raw data of the corresponding file</li>
+       /// </ul>
+       /// 
+       /// The fileCount integer indicates how many files are contained in this compound
+       /// file. The {directory} that follows has that many entries. Each directory entry
+       /// contains a long pointer to the start of this file's data section, and a String
+       /// with that file's name.
+       /// 
+       /// 
+       /// </summary>
+       /// <version>  $Id: CompoundFileWriter.java 690539 2008-08-30 17:33:06Z mikemccand $
+       /// </version>
+       public sealed class CompoundFileWriter
+       {
+               
+               private sealed class FileEntry
+               {
+                       /// <summary>source file </summary>
+                       internal System.String file;
+                       
+                       /// <summary>temporary holder for the start of directory entry for this file </summary>
+                       internal long directoryOffset;
+                       
+                       /// <summary>temporary holder for the start of this file's data section </summary>
+                       internal long dataOffset;
+               }
+               
+               
+               private Directory directory;
+               private System.String fileName;
+        private System.Collections.Hashtable ids;
+               private System.Collections.ArrayList entries;
+               private bool merged = false;
+               private SegmentMerger.CheckAbort checkAbort;
+               
+               /// <summary>Create the compound stream in the specified file. The file name is the
+               /// entire name (no extensions are added).
+               /// </summary>
+               /// <throws>  NullPointerException if <code>dir</code> or <code>name</code> is null </throws>
+               public CompoundFileWriter(Directory dir, System.String name):this(dir, name, null)
+               {
+               }
+               
+               internal CompoundFileWriter(Directory dir, System.String name, SegmentMerger.CheckAbort checkAbort)
+               {
+                       if (dir == null)
+                               throw new System.NullReferenceException("directory cannot be null");
+                       if (name == null)
+                               throw new System.NullReferenceException("name cannot be null");
+                       this.checkAbort = checkAbort;
+                       directory = dir;
+                       fileName = name;
+            ids = new System.Collections.Hashtable();
+                       entries = new System.Collections.ArrayList();
+               }
+               
+               /// <summary>Returns the directory of the compound file. </summary>
+               public Directory GetDirectory()
+               {
+                       return directory;
+               }
+               
+               /// <summary>Returns the name of the compound file. </summary>
+               public System.String GetName()
+               {
+                       return fileName;
+               }
+               
+               /// <summary>Add a source stream. <code>file</code> is the string by which the 
+               /// sub-stream will be known in the compound stream.
+               /// 
+               /// </summary>
+               /// <throws>  IllegalStateException if this writer is closed </throws>
+               /// <throws>  NullPointerException if <code>file</code> is null </throws>
+               /// <throws>  IllegalArgumentException if a file with the same name </throws>
+               /// <summary>   has been added already
+               /// </summary>
+               public void  AddFile(System.String file)
+               {
+                       if (merged)
+                               throw new System.SystemException("Can't add extensions after merge has been called");
+                       
+                       if (file == null)
+                               throw new System.NullReferenceException("file cannot be null");
+                       
+            try
+            {
+                ids.Add(file, file);
+            }
+            catch (Exception)
+            {
+                               throw new System.ArgumentException("File " + file + " already added");
+            }
+                       
+                       FileEntry entry = new FileEntry();
+                       entry.file = file;
+                       entries.Add(entry);
+               }
+               
+               /// <summary>Merge files with the extensions added up to now.
+               /// All files with these extensions are combined sequentially into the
+               /// compound stream. After successful merge, the source files
+               /// are deleted.
+               /// </summary>
+               /// <throws>  IllegalStateException if close() had been called before or </throws>
+               /// <summary>   if no file has been added to this object
+               /// </summary>
+               public void  Close()
+               {
+                       if (merged)
+                               throw new System.SystemException("Merge already performed");
+                       
+                       if ((entries.Count == 0))
+                               throw new System.SystemException("No entries to merge have been defined");
+                       
+                       merged = true;
+                       
+                       // open the compound stream
+                       IndexOutput os = null;
+                       try
+                       {
+                               os = directory.CreateOutput(fileName);
+                               
+                               // Write the number of entries
+                               os.WriteVInt(entries.Count);
+                               
+                               // Write the directory with all offsets at 0.
+                               // Remember the positions of directory entries so that we can
+                               // adjust the offsets later
+                               System.Collections.IEnumerator it = entries.GetEnumerator();
+                               long totalSize = 0;
+                               while (it.MoveNext())
+                               {
+                                       FileEntry fe = (FileEntry) it.Current;
+                                       fe.directoryOffset = os.GetFilePointer();
+                                       os.WriteLong(0); // for now
+                                       os.WriteString(fe.file);
+                                       totalSize += directory.FileLength(fe.file);
+                               }
+                               
+                               // Pre-allocate size of file as optimization --
+                               // this can potentially help IO performance as
+                               // we write the file and also later during
+                               // searching.  It also uncovers a disk-full
+                               // situation earlier and hopefully without
+                               // actually filling disk to 100%:
+                               long finalLength = totalSize + os.GetFilePointer();
+                               os.SetLength(finalLength);
+                               
+                               // Open the files and copy their data into the stream.
+                               // Remember the locations of each file's data section.
+                               byte[] buffer = new byte[16384];
+                               it = entries.GetEnumerator();
+                               while (it.MoveNext())
+                               {
+                                       FileEntry fe = (FileEntry) it.Current;
+                                       fe.dataOffset = os.GetFilePointer();
+                                       CopyFile(fe, os, buffer);
+                               }
+                               
+                               // Write the data offsets into the directory of the compound stream
+                               it = entries.GetEnumerator();
+                               while (it.MoveNext())
+                               {
+                                       FileEntry fe = (FileEntry) it.Current;
+                                       os.Seek(fe.directoryOffset);
+                                       os.WriteLong(fe.dataOffset);
+                               }
+                               
+                               System.Diagnostics.Debug.Assert(finalLength == os.Length());
+                               
+                               // Close the output stream. Set the os to null before trying to
+                               // close so that if an exception occurs during the close, the
+                               // finally clause below will not attempt to close the stream
+                               // the second time.
+                               IndexOutput tmp = os;
+                               os = null;
+                               tmp.Close();
+                       }
+                       finally
+                       {
+                               if (os != null)
+                                       try
+                                       {
+                                               os.Close();
+                                       }
+                                       catch (System.IO.IOException e)
+                                       {
+                                       }
+                       }
+               }
+               
+               /// <summary>Copy the contents of the file with specified extension into the
+               /// provided output stream. Use the provided buffer for moving data
+               /// to reduce memory allocation.
+               /// </summary>
+               private void  CopyFile(FileEntry source, IndexOutput os, byte[] buffer)
+               {
+                       IndexInput is_Renamed = null;
+                       try
+                       {
+                               long startPtr = os.GetFilePointer();
+                               
+                               is_Renamed = directory.OpenInput(source.file);
+                               long length = is_Renamed.Length();
+                               long remainder = length;
+                               int chunk = buffer.Length;
+                               
+                               while (remainder > 0)
+                               {
+                                       int len = (int) System.Math.Min(chunk, remainder);
+                                       is_Renamed.ReadBytes(buffer, 0, len, false);
+                                       os.WriteBytes(buffer, len);
+                                       remainder -= len;
+                                       if (checkAbort != null)
+                                       // Roughly every 2 MB we will check if
+                                       // it's time to abort
+                                               checkAbort.Work(80);
+                               }
+                               
+                               // Verify that remainder is 0
+                               if (remainder != 0)
+                                       throw new System.IO.IOException("Non-zero remainder length after copying: " + remainder + " (id: " + source.file + ", length: " + length + ", buffer size: " + chunk + ")");
+                               
+                               // Verify that the output length diff is equal to original file
+                               long endPtr = os.GetFilePointer();
+                               long diff = endPtr - startPtr;
+                               if (diff != length)
+                                       throw new System.IO.IOException("Difference in the output file offsets " + diff + " does not match the original file length " + length);
+                       }
+                       finally
+                       {
+                               if (is_Renamed != null)
+                                       is_Renamed.Close();
+                       }
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/ConcurrentMergeScheduler.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/ConcurrentMergeScheduler.cs
new file mode 100644 (file)
index 0000000..62ef844
--- /dev/null
@@ -0,0 +1,543 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using Directory = Mono.Lucene.Net.Store.Directory;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       /// <summary>A {@link MergeScheduler} that runs each merge using a
+       /// separate thread, up until a maximum number of threads
+       /// ({@link #setMaxThreadCount}) at which when a merge is
+       /// needed, the thread(s) that are updating the index will
+       /// pause until one or more merges completes.  This is a
+       /// simple way to use concurrency in the indexing process
+       /// without having to create and manage application level
+       /// threads. 
+       /// </summary>
+       
+       public class ConcurrentMergeScheduler:MergeScheduler
+       {
+               
+               private int mergeThreadPriority = - 1;
+               
+               protected internal System.Collections.IList mergeThreads = new System.Collections.ArrayList();
+               
+               // Max number of threads allowed to be merging at once
+               private int maxThreadCount = 1;
+               
+               protected internal Directory dir;
+               
+               private bool closed;
+               protected internal IndexWriter writer;
+               protected internal int mergeThreadCount;
+               
+               public ConcurrentMergeScheduler()
+               {
+                       if (allInstances != null)
+                       {
+                               // Only for testing
+                               AddMyself();
+                       }
+               }
+               
+               /// <summary>Sets the max # simultaneous threads that may be
+               /// running.  If a merge is necessary yet we already have
+               /// this many threads running, the incoming thread (that
+               /// is calling add/updateDocument) will block until
+               /// a merge thread has completed. 
+               /// </summary>
+               public virtual void  SetMaxThreadCount(int count)
+               {
+                       if (count < 1)
+                               throw new System.ArgumentException("count should be at least 1");
+                       maxThreadCount = count;
+               }
+               
+               /// <summary>Get the max # simultaneous threads that may be</summary>
+               /// <seealso cref="setMaxThreadCount">
+               /// </seealso>
+               public virtual int GetMaxThreadCount()
+               {
+                       return maxThreadCount;
+               }
+               
+               /// <summary>Return the priority that merge threads run at.  By
+               /// default the priority is 1 plus the priority of (ie,
+               /// slightly higher priority than) the first thread that
+               /// calls merge. 
+               /// </summary>
+               public virtual int GetMergeThreadPriority()
+               {
+                       lock (this)
+                       {
+                               InitMergeThreadPriority();
+                               return mergeThreadPriority;
+                       }
+               }
+               
+               /// <summary>Return the priority that merge threads run at. </summary>
+               public virtual void  SetMergeThreadPriority(int pri)
+               {
+                       lock (this)
+                       {
+                               if (pri > (int) System.Threading.ThreadPriority.Highest || pri < (int) System.Threading.ThreadPriority.Lowest)
+                                       throw new System.ArgumentException("priority must be in range " + (int) System.Threading.ThreadPriority.Lowest + " .. " + (int) System.Threading.ThreadPriority.Highest + " inclusive");
+                               mergeThreadPriority = pri;
+                               
+                               int numThreads = MergeThreadCount();
+                               for (int i = 0; i < numThreads; i++)
+                               {
+                                       MergeThread merge = (MergeThread) mergeThreads[i];
+                                       merge.SetThreadPriority(pri);
+                               }
+                       }
+               }
+               
+               private bool Verbose()
+               {
+                       return writer != null && writer.Verbose();
+               }
+               
+               private void  Message(System.String message)
+               {
+                       if (Verbose())
+                               writer.Message("CMS: " + message);
+               }
+               
+               private void  InitMergeThreadPriority()
+               {
+                       lock (this)
+                       {
+                               if (mergeThreadPriority == - 1)
+                               {
+                                       // Default to slightly higher priority than our
+                                       // calling thread
+                                       mergeThreadPriority = 1 + (System.Int32) SupportClass.ThreadClass.Current().Priority;
+                                       if (mergeThreadPriority > (int) System.Threading.ThreadPriority.Highest)
+                                               mergeThreadPriority = (int) System.Threading.ThreadPriority.Highest;
+                               }
+                       }
+               }
+               
+               public override void  Close()
+               {
+                       closed = true;
+               }
+               
+               public virtual void  Sync()
+               {
+                       lock (this)
+                       {
+                               while (MergeThreadCount() > 0)
+                               {
+                                       if (Verbose())
+                                               Message("now wait for threads; currently " + mergeThreads.Count + " still running");
+                                       int count = mergeThreads.Count;
+                                       if (Verbose())
+                                       {
+                                               for (int i = 0; i < count; i++)
+                                                       Message("    " + i + ": " + ((MergeThread) mergeThreads[i]));
+                                       }
+                                       
+                                       try
+                                       {
+                                               System.Threading.Monitor.Wait(this);
+                                       }
+                                       catch (System.Threading.ThreadInterruptedException ie)
+                                       {
+                                               // In 3.0 we will change this to throw
+                                               // InterruptedException instead
+                                               SupportClass.ThreadClass.Current().Interrupt();
+                                               throw new System.SystemException(ie.Message, ie);
+                                       }
+                               }
+                       }
+               }
+               
+               private int MergeThreadCount()
+               {
+            lock (this)
+            {
+                return MergeThreadCount(false);
+            }
+               }
+
+        private int MergeThreadCount(bool excludeDone)
+        {
+            lock (this)
+            {
+                int count = 0;
+                int numThreads = mergeThreads.Count;
+                for (int i = 0; i < numThreads; i++)
+                {
+                    MergeThread t = (MergeThread)mergeThreads[i];
+                    if (t.IsAlive)
+                    {
+                        MergePolicy.OneMerge runningMerge = t.GetRunningMerge();
+                        if (!excludeDone || (runningMerge != null && !runningMerge.mergeDone))
+                        {
+                            count++;
+                        }
+                    }
+                }
+                return count;
+            }
+        }
+               
+               public override void  Merge(IndexWriter writer)
+               {
+                       
+                       // TODO: enable this once we are on JRE 1.5
+                       // assert !Thread.holdsLock(writer);
+                       
+                       this.writer = writer;
+                       
+                       InitMergeThreadPriority();
+                       
+                       dir = writer.GetDirectory();
+                       
+                       // First, quickly run through the newly proposed merges
+                       // and add any orthogonal merges (ie a merge not
+                       // involving segments already pending to be merged) to
+                       // the queue.  If we are way behind on merging, many of
+                       // these newly proposed merges will likely already be
+                       // registered.
+                       
+                       if (Verbose())
+                       {
+                               Message("now merge");
+                               Message("  index: " + writer.SegString());
+                       }
+                       
+                       // Iterate, pulling from the IndexWriter's queue of
+                       // pending merges, until it's empty:
+                       while (true)
+                       {
+                               
+                               // TODO: we could be careful about which merges to do in
+                               // the BG (eg maybe the "biggest" ones) vs FG, which
+                               // merges to do first (the easiest ones?), etc.
+                               
+                               MergePolicy.OneMerge merge = writer.GetNextMerge();
+                               if (merge == null)
+                               {
+                                       if (Verbose())
+                                               Message("  no more merges pending; now return");
+                                       return ;
+                               }
+                               
+                               // We do this w/ the primary thread to keep
+                               // deterministic assignment of segment names
+                               writer.MergeInit(merge);
+                               
+                               bool success = false;
+                               try
+                               {
+                                       lock (this)
+                                       {
+                                               MergeThread merger;
+                                               while (MergeThreadCount(true) >= maxThreadCount)
+                                               {
+                                                       if (Verbose())
+                                                               Message("    too many merge threads running; stalling...");
+                                                       try
+                                                       {
+                                                               System.Threading.Monitor.Wait(this);
+                                                       }
+                                                       catch (System.Threading.ThreadInterruptedException ie)
+                                                       {
+                                                               // In 3.0 we will change this to throw
+                                                               // InterruptedException instead
+                                                               SupportClass.ThreadClass.Current().Interrupt();
+                                                               throw new System.SystemException(ie.Message, ie);
+                                                       }
+                                               }
+                                               
+                                               if (Verbose())
+                                                       Message("  consider merge " + merge.SegString(dir));
+                                               
+                                                                                               
+                                               // OK to spawn a new merge thread to handle this
+                                               // merge:
+                                               merger = GetMergeThread(writer, merge);
+                                               mergeThreads.Add(merger);
+                                               if (Verbose())
+                                                       Message("    launch new thread [" + merger.Name + "]");
+                                               
+                                               merger.Start();
+                                               success = true;
+                                       }
+                               }
+                               finally
+                               {
+                                       if (!success)
+                                       {
+                                               writer.MergeFinish(merge);
+                                       }
+                               }
+                       }
+               }
+               
+               /// <summary>Does the actual merge, by calling {@link IndexWriter#merge} </summary>
+               protected internal virtual void  DoMerge(MergePolicy.OneMerge merge)
+               {
+                       writer.Merge(merge);
+               }
+               
+               /// <summary>Create and return a new MergeThread </summary>
+               protected internal virtual MergeThread GetMergeThread(IndexWriter writer, MergePolicy.OneMerge merge)
+               {
+                       lock (this)
+                       {
+                               MergeThread thread = new MergeThread(this, writer, merge);
+                               thread.SetThreadPriority(mergeThreadPriority);
+                               thread.IsBackground = true;
+                               thread.Name = "Lucene Merge Thread #" + mergeThreadCount++;
+                               return thread;
+                       }
+               }
+               
+               public /*protected internal*/ class MergeThread:SupportClass.ThreadClass
+               {
+                       private void  InitBlock(ConcurrentMergeScheduler enclosingInstance)
+                       {
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private ConcurrentMergeScheduler enclosingInstance;
+                       public ConcurrentMergeScheduler Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       
+                       internal IndexWriter writer;
+                       internal MergePolicy.OneMerge startMerge;
+                       internal MergePolicy.OneMerge runningMerge;
+                       
+                       public MergeThread(ConcurrentMergeScheduler enclosingInstance, IndexWriter writer, MergePolicy.OneMerge startMerge)
+                       {
+                               InitBlock(enclosingInstance);
+                               this.writer = writer;
+                               this.startMerge = startMerge;
+                       }
+                       
+                       public virtual void  SetRunningMerge(MergePolicy.OneMerge merge)
+                       {
+                               lock (this)
+                               {
+                                       runningMerge = merge;
+                               }
+                       }
+                       
+                       public virtual MergePolicy.OneMerge GetRunningMerge()
+                       {
+                               lock (this)
+                               {
+                                       return runningMerge;
+                               }
+                       }
+                       
+                       public virtual void  SetThreadPriority(int pri)
+                       {
+                               try
+                               {
+                                       Priority = (System.Threading.ThreadPriority) pri;
+                               }
+                               catch (System.NullReferenceException npe)
+                               {
+                                       // Strangely, Sun's JDK 1.5 on Linux sometimes
+                                       // throws NPE out of here...
+                               }
+                               catch (System.Security.SecurityException se)
+                               {
+                                       // Ignore this because we will still run fine with
+                                       // normal thread priority
+                               }
+                       }
+                       
+                       override public void  Run()
+                       {
+                               
+                               // First time through the while loop we do the merge
+                               // that we were started with:
+                               MergePolicy.OneMerge merge = this.startMerge;
+                               
+                               try
+                               {
+                                       
+                                       if (Enclosing_Instance.Verbose())
+                                               Enclosing_Instance.Message("  merge thread: start");
+                                       
+                                       while (true)
+                                       {
+                                               SetRunningMerge(merge);
+                                               Enclosing_Instance.DoMerge(merge);
+                                               
+                                               // Subsequent times through the loop we do any new
+                                               // merge that writer says is necessary:
+                                               merge = writer.GetNextMerge();
+                                               if (merge != null)
+                                               {
+                                                       writer.MergeInit(merge);
+                                                       if (Enclosing_Instance.Verbose())
+                                                               Enclosing_Instance.Message("  merge thread: do another merge " + merge.SegString(Enclosing_Instance.dir));
+                                               }
+                                               else
+                                                       break;
+                                       }
+                                       
+                                       if (Enclosing_Instance.Verbose())
+                                               Enclosing_Instance.Message("  merge thread: done");
+                               }
+                               catch (System.Exception exc)
+                               {
+                                       
+                                       // Ignore the exception if it was due to abort:
+                                       if (!(exc is MergePolicy.MergeAbortedException))
+                                       {
+                                               if (!Enclosing_Instance.suppressExceptions)
+                                               {
+                                                       // suppressExceptions is normally only set during
+                                                       // testing.
+                                                       Mono.Lucene.Net.Index.ConcurrentMergeScheduler.anyExceptions = true;
+                                                       Enclosing_Instance.HandleMergeException(exc);
+                                               }
+                                       }
+                               }
+                               finally
+                               {
+                                       lock (Enclosing_Instance)
+                                       {
+                                               System.Threading.Monitor.PulseAll(Enclosing_Instance);
+                                               Enclosing_Instance.mergeThreads.Remove(this);
+                        bool removed = !Enclosing_Instance.mergeThreads.Contains(this);
+                                               System.Diagnostics.Debug.Assert(removed);
+                                       }
+                               }
+                       }
+                       
+                       public override System.String ToString()
+                       {
+                               MergePolicy.OneMerge merge = GetRunningMerge();
+                               if (merge == null)
+                                       merge = startMerge;
+                               return "merge thread: " + merge.SegString(Enclosing_Instance.dir);
+                       }
+               }
+               
+               /// <summary>Called when an exception is hit in a background merge
+               /// thread 
+               /// </summary>
+               protected internal virtual void  HandleMergeException(System.Exception exc)
+               {
+                       try
+                       {
+                               // When an exception is hit during merge, IndexWriter
+                               // removes any partial files and then allows another
+                               // merge to run.  If whatever caused the error is not
+                               // transient then the exception will keep happening,
+                               // so, we sleep here to avoid saturating CPU in such
+                               // cases:
+                               System.Threading.Thread.Sleep(new System.TimeSpan((System.Int64) 10000 * 250));
+                       }
+                       catch (System.Threading.ThreadInterruptedException ie)
+                       {
+                               SupportClass.ThreadClass.Current().Interrupt();
+                               // In 3.0 this will throw InterruptedException
+                               throw new System.SystemException(ie.Message, ie);
+                       }
+                       throw new MergePolicy.MergeException(exc, dir);
+               }
+               
+               internal static bool anyExceptions = false;
+               
+               /// <summary>Used for testing </summary>
+               public static bool AnyUnhandledExceptions()
+               {
+                       if (allInstances == null)
+                       {
+                               throw new System.SystemException("setTestMode() was not called; often this is because your test case's setUp method fails to call super.setUp in LuceneTestCase");
+                       }
+                       lock (allInstances.SyncRoot)
+                       {
+                               int count = allInstances.Count;
+                               // Make sure all outstanding threads are done so we see
+                               // any exceptions they may produce:
+                               for (int i = 0; i < count; i++)
+                                       ((ConcurrentMergeScheduler) allInstances[i]).Sync();
+                               bool v = anyExceptions;
+                               anyExceptions = false;
+                               return v;
+                       }
+               }
+               
+               public static void  ClearUnhandledExceptions()
+               {
+                       lock (allInstances.SyncRoot)
+                       {
+                               anyExceptions = false;
+                       }
+               }
+               
+               /// <summary>Used for testing </summary>
+               private void  AddMyself()
+               {
+                       lock (allInstances.SyncRoot)
+                       {
+                               int size = allInstances.Count;
+                               int upto = 0;
+                               for (int i = 0; i < size; i++)
+                               {
+                                       ConcurrentMergeScheduler other = (ConcurrentMergeScheduler) allInstances[i];
+                                       if (!(other.closed && 0 == other.MergeThreadCount()))
+                                       // Keep this one for now: it still has threads or
+                                       // may spawn new threads
+                                               allInstances[upto++] = other;
+                               }
+                               ((System.Collections.IList) ((System.Collections.ArrayList) allInstances).GetRange(upto, allInstances.Count - upto)).Clear();
+                               allInstances.Add(this);
+                       }
+               }
+               
+               private bool suppressExceptions;
+               
+               /// <summary>Used for testing </summary>
+               public /*internal*/ virtual void  SetSuppressExceptions()
+               {
+                       suppressExceptions = true;
+               }
+               
+               /// <summary>Used for testing </summary>
+               public /*internal*/ virtual void  ClearSuppressExceptions()
+               {
+                       suppressExceptions = false;
+               }
+               
+               /// <summary>Used for testing </summary>
+               private static System.Collections.IList allInstances;
+               public static void  SetTestMode()
+               {
+                       allInstances = new System.Collections.ArrayList();
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/CorruptIndexException.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/CorruptIndexException.cs
new file mode 100644 (file)
index 0000000..33c9e77
--- /dev/null
@@ -0,0 +1,36 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       /// <summary> This exception is thrown when Lucene detects
+       /// an inconsistency in the index.
+       /// </summary>
+       [Serializable]
+       public class CorruptIndexException:System.IO.IOException
+       {
+               public CorruptIndexException(System.String message):base(message)
+               {
+               }
+               public CorruptIndexException(System.String message, Exception exp):base(message, exp)
+               {
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/DefaultSkipListReader.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/DefaultSkipListReader.cs
new file mode 100644 (file)
index 0000000..6a72b6f
--- /dev/null
@@ -0,0 +1,129 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexInput = Mono.Lucene.Net.Store.IndexInput;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       /// <summary> Implements the skip list reader for the default posting list format
+       /// that stores positions and payloads.
+       /// 
+       /// </summary>
+       class DefaultSkipListReader:MultiLevelSkipListReader
+       {
+               private bool currentFieldStoresPayloads;
+               private long[] freqPointer;
+               private long[] proxPointer;
+               private int[] payloadLength;
+               
+               private long lastFreqPointer;
+               private long lastProxPointer;
+               private int lastPayloadLength;
+               
+               
+               internal DefaultSkipListReader(IndexInput skipStream, int maxSkipLevels, int skipInterval):base(skipStream, maxSkipLevels, skipInterval)
+               {
+                       freqPointer = new long[maxSkipLevels];
+                       proxPointer = new long[maxSkipLevels];
+                       payloadLength = new int[maxSkipLevels];
+               }
+               
+               internal virtual void  Init(long skipPointer, long freqBasePointer, long proxBasePointer, int df, bool storesPayloads)
+               {
+                       base.Init(skipPointer, df);
+                       this.currentFieldStoresPayloads = storesPayloads;
+                       lastFreqPointer = freqBasePointer;
+                       lastProxPointer = proxBasePointer;
+
+                       for (int i = 0; i < freqPointer.Length; i++) freqPointer[i] = freqBasePointer;
+                       for (int i = 0; i < proxPointer.Length; i++) proxPointer[i] = proxBasePointer;
+                       for (int i = 0; i < payloadLength.Length; i++) payloadLength[i] = 0;
+        }
+               
+               /// <summary>Returns the freq pointer of the doc to which the last call of 
+               /// {@link MultiLevelSkipListReader#SkipTo(int)} has skipped.  
+               /// </summary>
+               internal virtual long GetFreqPointer()
+               {
+                       return lastFreqPointer;
+               }
+               
+               /// <summary>Returns the prox pointer of the doc to which the last call of 
+               /// {@link MultiLevelSkipListReader#SkipTo(int)} has skipped.  
+               /// </summary>
+               internal virtual long GetProxPointer()
+               {
+                       return lastProxPointer;
+               }
+               
+               /// <summary>Returns the payload length of the payload stored just before 
+               /// the doc to which the last call of {@link MultiLevelSkipListReader#SkipTo(int)} 
+               /// has skipped.  
+               /// </summary>
+               internal virtual int GetPayloadLength()
+               {
+                       return lastPayloadLength;
+               }
+               
+               protected internal override void  SeekChild(int level)
+               {
+                       base.SeekChild(level);
+                       freqPointer[level] = lastFreqPointer;
+                       proxPointer[level] = lastProxPointer;
+                       payloadLength[level] = lastPayloadLength;
+               }
+               
+               protected internal override void  SetLastSkipData(int level)
+               {
+                       base.SetLastSkipData(level);
+                       lastFreqPointer = freqPointer[level];
+                       lastProxPointer = proxPointer[level];
+                       lastPayloadLength = payloadLength[level];
+               }
+               
+               
+               protected internal override int ReadSkipData(int level, IndexInput skipStream)
+               {
+                       int delta;
+                       if (currentFieldStoresPayloads)
+                       {
+                               // the current field stores payloads.
+                               // if the doc delta is odd then we have
+                               // to read the current payload length
+                               // because it differs from the length of the
+                               // previous payload
+                               delta = skipStream.ReadVInt();
+                               if ((delta & 1) != 0)
+                               {
+                                       payloadLength[level] = skipStream.ReadVInt();
+                               }
+                               delta = SupportClass.Number.URShift(delta, 1);
+                       }
+                       else
+                       {
+                               delta = skipStream.ReadVInt();
+                       }
+                       freqPointer[level] += skipStream.ReadVInt();
+                       proxPointer[level] += skipStream.ReadVInt();
+                       
+                       return delta;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/DefaultSkipListWriter.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/DefaultSkipListWriter.cs
new file mode 100644 (file)
index 0000000..5bffed6
--- /dev/null
@@ -0,0 +1,143 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexOutput = Mono.Lucene.Net.Store.IndexOutput;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       
+       /// <summary> Implements the skip list writer for the default posting list format
+       /// that stores positions and payloads.
+       /// 
+       /// </summary>
+       class DefaultSkipListWriter:MultiLevelSkipListWriter
+       {
+               private int[] lastSkipDoc;
+               private int[] lastSkipPayloadLength;
+               private long[] lastSkipFreqPointer;
+               private long[] lastSkipProxPointer;
+               
+               private IndexOutput freqOutput;
+               private IndexOutput proxOutput;
+               
+               private int curDoc;
+               private bool curStorePayloads;
+               private int curPayloadLength;
+               private long curFreqPointer;
+               private long curProxPointer;
+               
+               internal DefaultSkipListWriter(int skipInterval, int numberOfSkipLevels, int docCount, IndexOutput freqOutput, IndexOutput proxOutput):base(skipInterval, numberOfSkipLevels, docCount)
+               {
+                       this.freqOutput = freqOutput;
+                       this.proxOutput = proxOutput;
+                       
+                       lastSkipDoc = new int[numberOfSkipLevels];
+                       lastSkipPayloadLength = new int[numberOfSkipLevels];
+                       lastSkipFreqPointer = new long[numberOfSkipLevels];
+                       lastSkipProxPointer = new long[numberOfSkipLevels];
+               }
+               
+               internal virtual void  SetFreqOutput(IndexOutput freqOutput)
+               {
+                       this.freqOutput = freqOutput;
+               }
+               
+               internal virtual void  SetProxOutput(IndexOutput proxOutput)
+               {
+                       this.proxOutput = proxOutput;
+               }
+               
+               /// <summary> Sets the values for the current skip data. </summary>
+               internal virtual void  SetSkipData(int doc, bool storePayloads, int payloadLength)
+               {
+                       this.curDoc = doc;
+                       this.curStorePayloads = storePayloads;
+                       this.curPayloadLength = payloadLength;
+                       this.curFreqPointer = freqOutput.GetFilePointer();
+                       if (proxOutput != null)
+                               this.curProxPointer = proxOutput.GetFilePointer();
+               }
+               
+               protected internal override void  ResetSkip()
+               {
+                       base.ResetSkip();
+                       for (int i = 0; i < lastSkipDoc.Length; i++) lastSkipDoc[i] = 0;
+                       for (int i = 0; i < lastSkipPayloadLength.Length; i++) lastSkipPayloadLength[i] = -1; // we don't have to write the first length in the skip list
+                       for (int i = 0; i < lastSkipFreqPointer.Length; i++) lastSkipFreqPointer[i] = freqOutput.GetFilePointer();
+                       if (proxOutput != null)
+                               for (int i = 0; i < lastSkipProxPointer.Length; i++) lastSkipProxPointer[i] = proxOutput.GetFilePointer();
+               }
+               
+               protected internal override void  WriteSkipData(int level, IndexOutput skipBuffer)
+               {
+                       // To efficiently store payloads in the posting lists we do not store the length of
+                       // every payload. Instead we omit the length for a payload if the previous payload had
+                       // the same length.
+                       // However, in order to support skipping the payload length at every skip point must be known.
+                       // So we use the same length encoding that we use for the posting lists for the skip data as well:
+                       // Case 1: current field does not store payloads
+                       //           SkipDatum                 --> DocSkip, FreqSkip, ProxSkip
+                       //           DocSkip,FreqSkip,ProxSkip --> VInt
+                       //           DocSkip records the document number before every SkipInterval th  document in TermFreqs. 
+                       //           Document numbers are represented as differences from the previous value in the sequence.
+                       // Case 2: current field stores payloads
+                       //           SkipDatum                 --> DocSkip, PayloadLength?, FreqSkip,ProxSkip
+                       //           DocSkip,FreqSkip,ProxSkip --> VInt
+                       //           PayloadLength             --> VInt    
+                       //         In this case DocSkip/2 is the difference between
+                       //         the current and the previous value. If DocSkip
+                       //         is odd, then a PayloadLength encoded as VInt follows,
+                       //         if DocSkip is even, then it is assumed that the
+                       //         current payload length equals the length at the previous
+                       //         skip point
+                       if (curStorePayloads)
+                       {
+                               int delta = curDoc - lastSkipDoc[level];
+                               if (curPayloadLength == lastSkipPayloadLength[level])
+                               {
+                                       // the current payload length equals the length at the previous skip point,
+                                       // so we don't store the length again
+                                       skipBuffer.WriteVInt(delta * 2);
+                               }
+                               else
+                               {
+                                       // the payload length is different from the previous one. We shift the DocSkip, 
+                                       // set the lowest bit and store the current payload length as VInt.
+                                       skipBuffer.WriteVInt(delta * 2 + 1);
+                                       skipBuffer.WriteVInt(curPayloadLength);
+                                       lastSkipPayloadLength[level] = curPayloadLength;
+                               }
+                       }
+                       else
+                       {
+                               // current field does not store payloads
+                               skipBuffer.WriteVInt(curDoc - lastSkipDoc[level]);
+                       }
+                       skipBuffer.WriteVInt((int) (curFreqPointer - lastSkipFreqPointer[level]));
+                       skipBuffer.WriteVInt((int) (curProxPointer - lastSkipProxPointer[level]));
+                       
+                       lastSkipDoc[level] = curDoc;
+                       //System.out.println("write doc at level " + level + ": " + curDoc);
+                       
+                       lastSkipFreqPointer[level] = curFreqPointer;
+                       lastSkipProxPointer[level] = curProxPointer;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/DirectoryOwningReader.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/DirectoryOwningReader.cs
new file mode 100644 (file)
index 0000000..9fd049c
--- /dev/null
@@ -0,0 +1,121 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       /// <summary> This class keeps track of closing the underlying directory. It is used to wrap
+       /// DirectoryReaders, that are created using a String/File parameter
+       /// in IndexReader.open() with FSDirectory.getDirectory().
+       /// </summary>
+       /// <deprecated> This helper class is removed with all String/File
+       /// IndexReader.open() methods in Lucene 3.0
+       /// </deprecated>
+    [Obsolete("This helper class is removed with all String/File IndexReader.open() methods in Lucene 3.0")]
+       sealed class DirectoryOwningReader:FilterIndexReader, System.ICloneable
+       {
+               
+               internal DirectoryOwningReader(IndexReader in_Renamed):base(in_Renamed)
+               {
+                       this.ref_Renamed = new SegmentReader.Ref();
+                       System.Diagnostics.Debug.Assert(this.ref_Renamed.RefCount() == 1);
+               }
+               
+               private DirectoryOwningReader(IndexReader in_Renamed, SegmentReader.Ref ref_Renamed):base(in_Renamed)
+               {
+                       this.ref_Renamed = ref_Renamed;
+                       ref_Renamed.IncRef();
+               }
+               
+               public override IndexReader Reopen()
+               {
+                       EnsureOpen();
+                       IndexReader r = in_Renamed.Reopen();
+                       if (r != in_Renamed)
+                               return new DirectoryOwningReader(r, ref_Renamed);
+                       return this;
+               }
+               
+               public override IndexReader Reopen(bool openReadOnly)
+               {
+                       EnsureOpen();
+                       IndexReader r = in_Renamed.Reopen(openReadOnly);
+                       if (r != in_Renamed)
+                               return new DirectoryOwningReader(r, ref_Renamed);
+                       return this;
+               }
+               
+               public override IndexReader Reopen(IndexCommit commit)
+               {
+                       EnsureOpen();
+                       IndexReader r = in_Renamed.Reopen(commit);
+                       if (r != in_Renamed)
+                               return new DirectoryOwningReader(r, ref_Renamed);
+                       return this;
+               }
+               
+               public override System.Object Clone()
+               {
+                       EnsureOpen();
+                       return new DirectoryOwningReader((IndexReader) in_Renamed.Clone(), ref_Renamed);
+               }
+               
+               public override IndexReader Clone(bool openReadOnly)
+               {
+                       EnsureOpen();
+                       return new DirectoryOwningReader(in_Renamed.Clone(openReadOnly), ref_Renamed);
+               }
+               
+               protected internal override void  DoClose()
+               {
+                       System.IO.IOException ioe = null;
+                       // close the reader, record exception
+                       try
+                       {
+                               base.DoClose();
+                       }
+                       catch (System.IO.IOException e)
+                       {
+                               ioe = e;
+                       }
+                       // close the directory, record exception
+                       if (ref_Renamed.DecRef() == 0)
+                       {
+                               try
+                               {
+                                       in_Renamed.Directory().Close();
+                               }
+                               catch (System.IO.IOException e)
+                               {
+                                       if (ioe == null)
+                                               ioe = e;
+                               }
+                       }
+                       // throw the first exception
+                       if (ioe != null)
+                               throw ioe;
+               }
+               
+               /// <summary> This member contains the ref counter, that is passed to each instance after cloning/reopening,
+               /// and is global to all DirectoryOwningReader derived from the original one.
+               /// This reuses the class {@link SegmentReader.Ref}
+               /// </summary>
+               private SegmentReader.Ref ref_Renamed;
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/DirectoryReader.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/DirectoryReader.cs
new file mode 100644 (file)
index 0000000..fcb8c7e
--- /dev/null
@@ -0,0 +1,1567 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using Document = Mono.Lucene.Net.Documents.Document;
+using FieldSelector = Mono.Lucene.Net.Documents.FieldSelector;
+using AlreadyClosedException = Mono.Lucene.Net.Store.AlreadyClosedException;
+using Directory = Mono.Lucene.Net.Store.Directory;
+using Lock = Mono.Lucene.Net.Store.Lock;
+using LockObtainFailedException = Mono.Lucene.Net.Store.LockObtainFailedException;
+using DefaultSimilarity = Mono.Lucene.Net.Search.DefaultSimilarity;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       /// <summary> An IndexReader which reads indexes with multiple segments.</summary>
+       public class DirectoryReader:IndexReader, System.ICloneable
+       {
+               /*new*/ private class AnonymousClassFindSegmentsFile:SegmentInfos.FindSegmentsFile
+               {
+                       private void  InitBlock(bool readOnly, Mono.Lucene.Net.Index.IndexDeletionPolicy deletionPolicy, int termInfosIndexDivisor)
+                       {
+                               this.readOnly = readOnly;
+                               this.deletionPolicy = deletionPolicy;
+                               this.termInfosIndexDivisor = termInfosIndexDivisor;
+                       }
+                       private bool readOnly;
+                       private Mono.Lucene.Net.Index.IndexDeletionPolicy deletionPolicy;
+                       private int termInfosIndexDivisor;
+                       internal AnonymousClassFindSegmentsFile(bool readOnly, Mono.Lucene.Net.Index.IndexDeletionPolicy deletionPolicy, int termInfosIndexDivisor, Mono.Lucene.Net.Store.Directory Param1):base(Param1)
+                       {
+                               InitBlock(readOnly, deletionPolicy, termInfosIndexDivisor);
+                       }
+                       public /*protected internal*/ override System.Object DoBody(System.String segmentFileName)
+                       {
+                               SegmentInfos infos = new SegmentInfos();
+                               infos.Read(directory, segmentFileName);
+                               if (readOnly)
+                                       return new ReadOnlyDirectoryReader(directory, infos, deletionPolicy, termInfosIndexDivisor);
+                               else
+                                       return new DirectoryReader(directory, infos, deletionPolicy, false, termInfosIndexDivisor);
+                       }
+               }
+               private class AnonymousClassFindSegmentsFile1:SegmentInfos.FindSegmentsFile
+               {
+                       private void  InitBlock(bool openReadOnly, DirectoryReader enclosingInstance)
+                       {
+                               this.openReadOnly = openReadOnly;
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private bool openReadOnly;
+                       private DirectoryReader enclosingInstance;
+                       public DirectoryReader Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       internal AnonymousClassFindSegmentsFile1(bool openReadOnly, DirectoryReader enclosingInstance, Mono.Lucene.Net.Store.Directory Param1):base(Param1)
+                       {
+                               InitBlock(openReadOnly, enclosingInstance);
+                       }
+                       public /*protected internal*/ override System.Object DoBody(System.String segmentFileName)
+                       {
+                               SegmentInfos infos = new SegmentInfos();
+                               infos.Read(directory, segmentFileName);
+                               return Enclosing_Instance.DoReopen(infos, false, openReadOnly);
+                       }
+               }
+               protected internal Directory directory;
+               protected internal bool readOnly;
+               
+               internal IndexWriter writer;
+               
+               private IndexDeletionPolicy deletionPolicy;
+        private System.Collections.Generic.Dictionary<string, string> synced = new System.Collections.Generic.Dictionary<string, string>();
+               private Lock writeLock;
+               private SegmentInfos segmentInfos;
+               private SegmentInfos segmentInfosStart;
+               private bool stale;
+               private int termInfosIndexDivisor;
+               
+               private bool rollbackHasChanges;
+                               
+               private SegmentReader[] subReaders;
+               private int[] starts; // 1st docno for each segment
+               private System.Collections.IDictionary normsCache = new System.Collections.Hashtable();
+               private int maxDoc = 0;
+               private int numDocs = - 1;
+               private bool hasDeletions = false;
+        
+        // Max version in index as of when we opened; this can be
+        // > our current segmentInfos version in case we were
+        // opened on a past IndexCommit:
+        private long maxIndexVersion;
+               
+               internal static IndexReader Open(Directory directory, IndexDeletionPolicy deletionPolicy, IndexCommit commit, bool readOnly, int termInfosIndexDivisor)
+               {
+                       return (IndexReader) new AnonymousClassFindSegmentsFile(readOnly, deletionPolicy, termInfosIndexDivisor, directory).Run(commit);
+               }
+               
+               /// <summary>Construct reading the named set of readers. </summary>
+               internal DirectoryReader(Directory directory, SegmentInfos sis, IndexDeletionPolicy deletionPolicy, bool readOnly, int termInfosIndexDivisor)
+               {
+                       this.directory = directory;
+                       this.readOnly = readOnly;
+                       this.segmentInfos = sis;
+                       this.deletionPolicy = deletionPolicy;
+                       this.termInfosIndexDivisor = termInfosIndexDivisor;
+                       
+                       if (!readOnly)
+                       {
+                               // We assume that this segments_N was previously
+                               // properly sync'd:
+                               SupportClass.CollectionsHelper.AddAllIfNotContains(synced, sis.Files(directory, true));
+                       }
+                       
+                       // To reduce the chance of hitting FileNotFound
+                       // (and having to retry), we open segments in
+                       // reverse because IndexWriter merges & deletes
+                       // the newest segments first.
+                       
+                       SegmentReader[] readers = new SegmentReader[sis.Count];
+                       for (int i = sis.Count - 1; i >= 0; i--)
+                       {
+                               bool success = false;
+                               try
+                               {
+                                       readers[i] = SegmentReader.Get(readOnly, sis.Info(i), termInfosIndexDivisor);
+                                       success = true;
+                               }
+                               finally
+                               {
+                                       if (!success)
+                                       {
+                                               // Close all readers we had opened:
+                                               for (i++; i < sis.Count; i++)
+                                               {
+                                                       try
+                                                       {
+                                                               readers[i].Close();
+                                                       }
+                                                       catch (System.Exception ignore)
+                                                       {
+                                                               // keep going - we want to clean up as much as possible
+                                                       }
+                                               }
+                                       }
+                               }
+                       }
+                       
+                       Initialize(readers);
+               }
+               
+               // Used by near real-time search
+               internal DirectoryReader(IndexWriter writer, SegmentInfos infos, int termInfosIndexDivisor)
+               {
+                       this.directory = writer.GetDirectory();
+                       this.readOnly = true;
+                       segmentInfos = infos;
+                       segmentInfosStart = (SegmentInfos) infos.Clone();
+                       this.termInfosIndexDivisor = termInfosIndexDivisor;
+                       if (!readOnly)
+                       {
+                               // We assume that this segments_N was previously
+                               // properly sync'd:
+                               SupportClass.CollectionsHelper.AddAllIfNotContains(synced, infos.Files(directory, true));
+                       }
+                       
+                       // IndexWriter synchronizes externally before calling
+                       // us, which ensures infos will not change; so there's
+                       // no need to process segments in reverse order
+                       int numSegments = infos.Count;
+                       SegmentReader[] readers = new SegmentReader[numSegments];
+                       Directory dir = writer.GetDirectory();
+                       int upto = 0;
+                       
+                       for (int i = 0; i < numSegments; i++)
+                       {
+                               bool success = false;
+                               try
+                               {
+                                       SegmentInfo info = infos.Info(i);
+                                       if (info.dir == dir)
+                                       {
+                                               readers[upto++] = writer.readerPool.GetReadOnlyClone(info, true, termInfosIndexDivisor);
+                                       }
+                                       success = true;
+                               }
+                               finally
+                               {
+                                       if (!success)
+                                       {
+                                               // Close all readers we had opened:
+                                               for (upto--; upto >= 0; upto--)
+                                               {
+                                                       try
+                                                       {
+                                                               readers[upto].Close();
+                                                       }
+                                                       catch (System.Exception ignore)
+                                                       {
+                                                               // keep going - we want to clean up as much as possible
+                                                       }
+                                               }
+                                       }
+                               }
+                       }
+                       
+                       this.writer = writer;
+                       
+                       if (upto < readers.Length)
+                       {
+                               // This means some segments were in a foreign Directory
+                               SegmentReader[] newReaders = new SegmentReader[upto];
+                               Array.Copy(readers, 0, newReaders, 0, upto);
+                               readers = newReaders;
+                       }
+                       
+                       Initialize(readers);
+               }
+               
+               /// <summary>This constructor is only used for {@link #Reopen()} </summary>
+               internal DirectoryReader(Directory directory, SegmentInfos infos, SegmentReader[] oldReaders, int[] oldStarts, System.Collections.IDictionary oldNormsCache, bool readOnly, bool doClone, int termInfosIndexDivisor)
+               {
+                       this.directory = directory;
+                       this.readOnly = readOnly;
+                       this.segmentInfos = infos;
+                       this.termInfosIndexDivisor = termInfosIndexDivisor;
+                       if (!readOnly)
+                       {
+                               // We assume that this segments_N was previously
+                               // properly sync'd:
+                               SupportClass.CollectionsHelper.AddAllIfNotContains(synced, infos.Files(directory, true));
+                       }
+                       
+                       // we put the old SegmentReaders in a map, that allows us
+                       // to lookup a reader using its segment name
+                       System.Collections.IDictionary segmentReaders = new System.Collections.Hashtable();
+                       
+                       if (oldReaders != null)
+                       {
+                               // create a Map SegmentName->SegmentReader
+                               for (int i = 0; i < oldReaders.Length; i++)
+                               {
+                                       segmentReaders[oldReaders[i].GetSegmentName()] = (System.Int32) i;
+                               }
+                       }
+                       
+                       SegmentReader[] newReaders = new SegmentReader[infos.Count];
+                       
+                       // remember which readers are shared between the old and the re-opened
+                       // DirectoryReader - we have to incRef those readers
+                       bool[] readerShared = new bool[infos.Count];
+                       
+                       for (int i = infos.Count - 1; i >= 0; i--)
+                       {
+                               // find SegmentReader for this segment
+                int? oldReaderIndex = (int?)segmentReaders[infos.Info(i).name];
+                if (oldReaderIndex.HasValue == false)
+                {
+                    // this is a new segment, no old SegmentReader can be reused
+                    newReaders[i] = null;
+                }
+                else
+                {
+                    // there is an old reader for this segment - we'll try to reopen it
+                    newReaders[i] = oldReaders[oldReaderIndex.Value];
+                }
+                               
+                               bool success = false;
+                               try
+                               {
+                                       SegmentReader newReader;
+                                       if (newReaders[i] == null || infos.Info(i).GetUseCompoundFile() != newReaders[i].GetSegmentInfo().GetUseCompoundFile())
+                                       {
+                                               
+                                               // We should never see a totally new segment during cloning
+                                               System.Diagnostics.Debug.Assert(!doClone);
+                                               
+                                               // this is a new reader; in case we hit an exception we can close it safely
+                                               newReader = SegmentReader.Get(readOnly, infos.Info(i), termInfosIndexDivisor);
+                                       }
+                                       else
+                                       {
+                                               newReader = newReaders[i].ReopenSegment(infos.Info(i), doClone, readOnly);
+                                       }
+                                       if (newReader == newReaders[i])
+                                       {
+                                               // this reader will be shared between the old and the new one,
+                                               // so we must incRef it
+                                               readerShared[i] = true;
+                                               newReader.IncRef();
+                                       }
+                                       else
+                                       {
+                                               readerShared[i] = false;
+                                               newReaders[i] = newReader;
+                                       }
+                                       success = true;
+                               }
+                               finally
+                               {
+                                       if (!success)
+                                       {
+                                               for (i++; i < infos.Count; i++)
+                                               {
+                                                       if (newReaders[i] != null)
+                                                       {
+                                                               try
+                                                               {
+                                                                       if (!readerShared[i])
+                                                                       {
+                                                                               // this is a new subReader that is not used by the old one,
+                                                                               // we can close it
+                                                                               newReaders[i].Close();
+                                                                       }
+                                                                       else
+                                                                       {
+                                                                               // this subReader is also used by the old reader, so instead
+                                                                               // closing we must decRef it
+                                                                               newReaders[i].DecRef();
+                                                                       }
+                                                               }
+                                                               catch (System.IO.IOException ignore)
+                                                               {
+                                                                       // keep going - we want to clean up as much as possible
+                                                               }
+                                                       }
+                                               }
+                                       }
+                               }
+                       }
+                       
+                       // initialize the readers to calculate maxDoc before we try to reuse the old normsCache
+                       Initialize(newReaders);
+                       
+                       // try to copy unchanged norms from the old normsCache to the new one
+                       if (oldNormsCache != null)
+                       {
+                               System.Collections.IEnumerator it = new System.Collections.Hashtable(oldNormsCache).GetEnumerator();
+                               while (it.MoveNext())
+                               {
+                                       System.Collections.DictionaryEntry entry = (System.Collections.DictionaryEntry) it.Current;
+                                       System.String field = (System.String) entry.Key;
+                                       if (!HasNorms(field))
+                                       {
+                                               continue;
+                                       }
+                                       
+                                       byte[] oldBytes = (byte[]) entry.Value;
+                                       
+                                       byte[] bytes = new byte[MaxDoc()];
+                                       
+                                       for (int i = 0; i < subReaders.Length; i++)
+                                       {
+                        int? oldReaderIndex = (int?)segmentReaders[subReaders[i].GetSegmentName()];
+
+                        // this SegmentReader was not re-opened, we can copy all of its norms 
+                        if (oldReaderIndex.HasValue &&
+                             (oldReaders[oldReaderIndex.Value] == subReaders[i]
+                               || oldReaders[oldReaderIndex.Value].norms[field] == subReaders[i].norms[field]))
+                        {
+                            // we don't have to synchronize here: either this constructor is called from a SegmentReader,
+                            // in which case no old norms cache is present, or it is called from MultiReader.reopen(),
+                            // which is synchronized
+                            Array.Copy(oldBytes, oldStarts[oldReaderIndex.Value], bytes, starts[i], starts[i + 1] - starts[i]);
+                        }
+                        else
+                        {
+                            subReaders[i].Norms(field, bytes, starts[i]);
+                        }
+                                       }
+                                       
+                                       normsCache[field] = bytes; // update cache
+                               }
+                       }
+               }
+               
+               private void  Initialize(SegmentReader[] subReaders)
+               {
+                       this.subReaders = subReaders;
+                       starts = new int[subReaders.Length + 1]; // build starts array
+                       for (int i = 0; i < subReaders.Length; i++)
+                       {
+                               starts[i] = maxDoc;
+                               maxDoc += subReaders[i].MaxDoc(); // compute maxDocs
+                               
+                               if (subReaders[i].HasDeletions())
+                                       hasDeletions = true;
+                       }
+                       starts[subReaders.Length] = maxDoc;
+
+            if (!readOnly)
+            {
+                maxIndexVersion = SegmentInfos.ReadCurrentVersion(directory);
+            }
+               }
+               
+               public override System.Object Clone()
+               {
+            lock (this)
+            {
+                try
+                {
+                    return Clone(readOnly); // Preserve current readOnly
+                }
+                catch (System.Exception ex)
+                {
+                    throw new System.SystemException(ex.Message, ex);
+                }
+            }
+               }
+               
+               public override IndexReader Clone(bool openReadOnly)
+               {
+                       lock (this)
+                       {
+                               DirectoryReader newReader = DoReopen((SegmentInfos) segmentInfos.Clone(), true, openReadOnly);
+                               
+                               if (this != newReader)
+                               {
+                                       newReader.deletionPolicy = deletionPolicy;
+                               }
+                               newReader.writer = writer;
+                               // If we're cloning a non-readOnly reader, move the
+                               // writeLock (if there is one) to the new reader:
+                               if (!openReadOnly && writeLock != null)
+                               {
+                                       // In near real-time search, reader is always readonly
+                                       System.Diagnostics.Debug.Assert(writer == null);
+                                       newReader.writeLock = writeLock;
+                                       newReader.hasChanges = hasChanges;
+                                       newReader.hasDeletions = hasDeletions;
+                                       writeLock = null;
+                                       hasChanges = false;
+                               }
+                               
+                               return newReader;
+                       }
+               }
+               
+               public override IndexReader Reopen()
+               {
+               // Preserve current readOnly
+                       return DoReopen(readOnly, null);
+               }
+               
+               public override IndexReader Reopen(bool openReadOnly)
+               {
+                       return DoReopen(openReadOnly, null);
+               }
+               
+               public override IndexReader Reopen(IndexCommit commit)
+               {
+                       return DoReopen(true, commit);
+               }
+
+        private IndexReader DoReopenFromWriter(bool openReadOnly, IndexCommit commit)
+        {
+            System.Diagnostics.Debug.Assert(readOnly);
+
+            if (!openReadOnly)
+            {
+                throw new System.ArgumentException("a reader obtained from IndexWriter.getReader() can only be reopened with openReadOnly=true (got false)");
+            }
+
+            if (commit != null)
+            {
+                throw new System.ArgumentException("a reader obtained from IndexWriter.getReader() cannot currently accept a commit");
+            }
+
+            // TODO: right now we *always* make a new reader; in
+            // the future we could have write make some effort to
+            // detect that no changes have occurred
+            return writer.GetReader();
+        }
+
+        internal virtual IndexReader DoReopen(bool openReadOnly, IndexCommit commit)
+        {
+            EnsureOpen();
+
+            System.Diagnostics.Debug.Assert(commit == null || openReadOnly);
+
+            // If we were obtained by writer.getReader(), re-ask the
+            // writer to get a new reader.
+            if (writer != null)
+            {
+                return DoReopenFromWriter(openReadOnly, commit);
+            }
+            else
+            {
+                return DoReopenNoWriter(openReadOnly, commit);
+            }
+        }
+                
+        private IndexReader DoReopenNoWriter(bool openReadOnly, IndexCommit commit)
+        {
+            lock (this)
+            {
+                if (commit == null)
+                {
+                    if (hasChanges)
+                    {
+                        // We have changes, which means we are not readOnly:
+                        System.Diagnostics.Debug.Assert(readOnly == false);
+                        // and we hold the write lock:
+                        System.Diagnostics.Debug.Assert(writeLock != null);
+                        // so no other writer holds the write lock, which
+                        // means no changes could have been done to the index:
+                        System.Diagnostics.Debug.Assert(IsCurrent());
+
+                        if (openReadOnly)
+                        {
+                            return (IndexReader)Clone(openReadOnly);
+                        }
+                        else
+                        {
+                            return this;
+                        }
+                    }
+                    else if (IsCurrent())
+                    {
+                        if (openReadOnly != readOnly)
+                        {
+                            // Just fallback to clone
+                            return (IndexReader)Clone(openReadOnly);
+                        }
+                        else
+                        {
+                            return this;
+                        }
+                    }
+                }
+                else
+                {
+                    if (directory != commit.GetDirectory())
+                        throw new System.IO.IOException("the specified commit does not match the specified Directory");
+                    if (segmentInfos != null && commit.GetSegmentsFileName().Equals(segmentInfos.GetCurrentSegmentFileName()))
+                    {
+                        if (readOnly != openReadOnly)
+                        {
+                            // Just fallback to clone
+                            return (IndexReader)Clone(openReadOnly);
+                        }
+                        else
+                        {
+                            return this;
+                        }
+                    }
+                }
+
+                return (IndexReader)new AnonymousFindSegmentsFile(directory, openReadOnly, this).Run(commit);
+            }
+        }
+
+        class AnonymousFindSegmentsFile : SegmentInfos.FindSegmentsFile
+        {
+            DirectoryReader enclosingInstance;
+            bool openReadOnly;
+            Directory dir;
+            public AnonymousFindSegmentsFile(Directory directory, bool openReadOnly, DirectoryReader dirReader) : base(directory)
+            {
+                this.dir = directory;
+                this.openReadOnly = openReadOnly;
+                enclosingInstance = dirReader;
+            }
+
+            public override object DoBody(string segmentFileName)
+            {
+                SegmentInfos infos = new SegmentInfos();
+                infos.Read(this.dir, segmentFileName);
+                return enclosingInstance.DoReopen(infos, false, openReadOnly);
+            }
+        }
+
+        private DirectoryReader DoReopen(SegmentInfos infos, bool doClone, bool openReadOnly)
+        {
+            lock (this)
+            {
+                DirectoryReader reader;
+                if (openReadOnly)
+                {
+                    reader = new ReadOnlyDirectoryReader(directory, infos, subReaders, starts, normsCache, doClone, termInfosIndexDivisor);
+                }
+                else
+                {
+                    reader = new DirectoryReader(directory, infos, subReaders, starts, normsCache, false, doClone, termInfosIndexDivisor);
+                }
+                reader.SetDisableFakeNorms(GetDisableFakeNorms());
+                return reader;
+            }
+        }
+
+
+               
+               /// <summary>Version number when this IndexReader was opened. </summary>
+               public override long GetVersion()
+               {
+                       EnsureOpen();
+                       return segmentInfos.GetVersion();
+               }
+               
+               public override TermFreqVector[] GetTermFreqVectors(int n)
+               {
+                       EnsureOpen();
+                       int i = ReaderIndex(n); // find segment num
+                       return subReaders[i].GetTermFreqVectors(n - starts[i]); // dispatch to segment
+               }
+               
+               public override TermFreqVector GetTermFreqVector(int n, System.String field)
+               {
+                       EnsureOpen();
+                       int i = ReaderIndex(n); // find segment num
+                       return subReaders[i].GetTermFreqVector(n - starts[i], field);
+               }
+               
+               
+               public override void  GetTermFreqVector(int docNumber, System.String field, TermVectorMapper mapper)
+               {
+                       EnsureOpen();
+                       int i = ReaderIndex(docNumber); // find segment num
+                       subReaders[i].GetTermFreqVector(docNumber - starts[i], field, mapper);
+               }
+               
+               public override void  GetTermFreqVector(int docNumber, TermVectorMapper mapper)
+               {
+                       EnsureOpen();
+                       int i = ReaderIndex(docNumber); // find segment num
+                       subReaders[i].GetTermFreqVector(docNumber - starts[i], mapper);
+               }
+               
+               /// <summary> Checks is the index is optimized (if it has a single segment and no deletions)</summary>
+               /// <returns> <code>true</code> if the index is optimized; <code>false</code> otherwise
+               /// </returns>
+               public override bool IsOptimized()
+               {
+                       EnsureOpen();
+                       return segmentInfos.Count == 1 && !HasDeletions();
+               }
+               
+               public override int NumDocs()
+               {
+                       // Don't call ensureOpen() here (it could affect performance)
+            // NOTE: multiple threads may wind up init'ing
+            // numDocs... but that's harmless
+                       if (numDocs == - 1)
+                       {
+                               // check cache
+                               int n = 0; // cache miss--recompute
+                               for (int i = 0; i < subReaders.Length; i++)
+                                       n += subReaders[i].NumDocs(); // sum from readers
+                               numDocs = n;
+                       }
+                       return numDocs;
+               }
+               
+               public override int MaxDoc()
+               {
+                       // Don't call ensureOpen() here (it could affect performance)
+                       return maxDoc;
+               }
+               
+               // inherit javadoc
+               public override Document Document(int n, FieldSelector fieldSelector)
+               {
+                       EnsureOpen();
+                       int i = ReaderIndex(n); // find segment num
+                       return subReaders[i].Document(n - starts[i], fieldSelector); // dispatch to segment reader
+               }
+               
+               public override bool IsDeleted(int n)
+               {
+                       // Don't call ensureOpen() here (it could affect performance)
+                       int i = ReaderIndex(n); // find segment num
+                       return subReaders[i].IsDeleted(n - starts[i]); // dispatch to segment reader
+               }
+               
+               public override bool HasDeletions()
+               {
+                       // Don't call ensureOpen() here (it could affect performance)
+                       return hasDeletions;
+               }
+               
+               protected internal override void  DoDelete(int n)
+               {
+                       numDocs = - 1; // invalidate cache
+                       int i = ReaderIndex(n); // find segment num
+                       subReaders[i].DeleteDocument(n - starts[i]); // dispatch to segment reader
+                       hasDeletions = true;
+               }
+               
+               protected internal override void  DoUndeleteAll()
+               {
+                       for (int i = 0; i < subReaders.Length; i++)
+                               subReaders[i].UndeleteAll();
+                       
+                       hasDeletions = false;
+                       numDocs = - 1; // invalidate cache
+               }
+               
+               private int ReaderIndex(int n)
+               {
+                       // find reader for doc n:
+                       return ReaderIndex(n, this.starts, this.subReaders.Length);
+               }
+               
+               internal static int ReaderIndex(int n, int[] starts, int numSubReaders)
+               {
+                       // find reader for doc n:
+                       int lo = 0; // search starts array
+                       int hi = numSubReaders - 1; // for first element less
+                       
+                       while (hi >= lo)
+                       {
+                               int mid = SupportClass.Number.URShift((lo + hi), 1);
+                               int midValue = starts[mid];
+                               if (n < midValue)
+                                       hi = mid - 1;
+                               else if (n > midValue)
+                                       lo = mid + 1;
+                               else
+                               {
+                                       // found a match
+                                       while (mid + 1 < numSubReaders && starts[mid + 1] == midValue)
+                                       {
+                                               mid++; // scan to last match
+                                       }
+                                       return mid;
+                               }
+                       }
+                       return hi;
+               }
+               
+               public override bool HasNorms(System.String field)
+               {
+                       EnsureOpen();
+                       for (int i = 0; i < subReaders.Length; i++)
+                       {
+                               if (subReaders[i].HasNorms(field))
+                                       return true;
+                       }
+                       return false;
+               }
+               
+               private byte[] ones;
+               private byte[] FakeNorms()
+               {
+                       if (ones == null)
+                               ones = SegmentReader.CreateFakeNorms(MaxDoc());
+                       return ones;
+               }
+               
+               public override byte[] Norms(System.String field)
+               {
+                       lock (this)
+                       {
+                               EnsureOpen();
+                               byte[] bytes = (byte[]) normsCache[field];
+                               if (bytes != null)
+                                       return bytes; // cache hit
+                               if (!HasNorms(field))
+                                       return GetDisableFakeNorms()?null:FakeNorms();
+                               
+                               bytes = new byte[MaxDoc()];
+                               for (int i = 0; i < subReaders.Length; i++)
+                                       subReaders[i].Norms(field, bytes, starts[i]);
+                               normsCache[field] = bytes; // update cache
+                               return bytes;
+                       }
+               }
+               
+               public override void  Norms(System.String field, byte[] result, int offset)
+               {
+                       lock (this)
+                       {
+                               EnsureOpen();
+                               byte[] bytes = (byte[]) normsCache[field];
+                               if (bytes == null && !HasNorms(field))
+                               {
+                    byte val = DefaultSimilarity.EncodeNorm(1.0f);
+                               for (int index = offset; index < result.Length; index++)
+                                       result.SetValue(val, index);
+                               }
+                               else if (bytes != null)
+                               {
+                                       // cache hit
+                                       Array.Copy(bytes, 0, result, offset, MaxDoc());
+                               }
+                               else
+                               {
+                                       for (int i = 0; i < subReaders.Length; i++)
+                                       {
+                                               // read from segments
+                                               subReaders[i].Norms(field, result, offset + starts[i]);
+                                       }
+                               }
+                       }
+               }
+               
+               protected internal override void  DoSetNorm(int n, System.String field, byte value_Renamed)
+               {
+                       lock (normsCache.SyncRoot)
+                       {
+                               normsCache.Remove(field); // clear cache      
+                       }
+                       int i = ReaderIndex(n); // find segment num
+                       subReaders[i].SetNorm(n - starts[i], field, value_Renamed); // dispatch
+               }
+               
+               public override TermEnum Terms()
+               {
+                       EnsureOpen();
+                       return new MultiTermEnum(this, subReaders, starts, null);
+               }
+               
+               public override TermEnum Terms(Term term)
+               {
+                       EnsureOpen();
+                       return new MultiTermEnum(this, subReaders, starts, term);
+               }
+               
+               public override int DocFreq(Term t)
+               {
+                       EnsureOpen();
+                       int total = 0; // sum freqs in segments
+                       for (int i = 0; i < subReaders.Length; i++)
+                               total += subReaders[i].DocFreq(t);
+                       return total;
+               }
+               
+               public override TermDocs TermDocs()
+               {
+                       EnsureOpen();
+                       return new MultiTermDocs(this, subReaders, starts);
+               }
+               
+               public override TermPositions TermPositions()
+               {
+                       EnsureOpen();
+                       return new MultiTermPositions(this, subReaders, starts);
+               }
+               
+               /// <summary> Tries to acquire the WriteLock on this directory. this method is only valid if this IndexReader is directory
+               /// owner.
+               /// 
+               /// </summary>
+               /// <throws>  StaleReaderException  if the index has changed since this reader was opened </throws>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  Mono.Lucene.Net.Store.LockObtainFailedException </throws>
+               /// <summary>                               if another writer has this index open (<code>write.lock</code> could not be
+               /// obtained)
+               /// </summary>
+               /// <throws>  IOException           if there is a low-level IO error </throws>
+               protected internal override void  AcquireWriteLock()
+               {
+                       
+                       if (readOnly)
+                       {
+                               // NOTE: we should not reach this code w/ the core
+                               // IndexReader classes; however, an external subclass
+                               // of IndexReader could reach this.
+                               ReadOnlySegmentReader.NoWrite();
+                       }
+                       
+                       if (segmentInfos != null)
+                       {
+                               EnsureOpen();
+                               if (stale)
+                                       throw new StaleReaderException("IndexReader out of date and no longer valid for delete, undelete, or setNorm operations");
+                               
+                               if (this.writeLock == null)
+                               {
+                                       Lock writeLock = directory.MakeLock(IndexWriter.WRITE_LOCK_NAME);
+                                       if (!writeLock.Obtain(IndexWriter.WRITE_LOCK_TIMEOUT))
+                                       // obtain write lock
+                                       {
+                                               throw new LockObtainFailedException("Index locked for write: " + writeLock);
+                                       }
+                                       this.writeLock = writeLock;
+                                       
+                    // we have to check whether index has changed since this reader was opened.
+                    // if so, this reader is no longer valid for
+                    // deletion
+                    if (SegmentInfos.ReadCurrentVersion(directory) > maxIndexVersion)
+                                       {
+                                               stale = true;
+                                               this.writeLock.Release();
+                                               this.writeLock = null;
+                                               throw new StaleReaderException("IndexReader out of date and no longer valid for delete, undelete, or setNorm operations");
+                                       }
+                               }
+                       }
+               }
+               
+               /// <deprecated>  
+               /// </deprecated>
+        [Obsolete]
+               protected internal override void  DoCommit()
+               {
+                       DoCommit(null);
+               }
+               
+               /// <summary> Commit changes resulting from delete, undeleteAll, or setNorm operations
+               /// <p/>
+               /// If an exception is hit, then either no changes or all changes will have been committed to the index (transactional
+               /// semantics).
+               /// 
+               /// </summary>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+        protected internal override void DoCommit(System.Collections.Generic.IDictionary<string, string> commitUserData)
+               {
+                       if (hasChanges)
+                       {
+                               segmentInfos.SetUserData(commitUserData);
+                               // Default deleter (for backwards compatibility) is
+                               // KeepOnlyLastCommitDeleter:
+                               IndexFileDeleter deleter = new IndexFileDeleter(directory, deletionPolicy == null?new KeepOnlyLastCommitDeletionPolicy():deletionPolicy, segmentInfos, null, null, synced);
+
+                segmentInfos.UpdateGeneration(deleter.GetLastSegmentInfos());
+
+                               // Checkpoint the state we are about to change, in
+                               // case we have to roll back:
+                               StartCommit();
+                               
+                               bool success = false;
+                               try
+                               {
+                                       for (int i = 0; i < subReaders.Length; i++)
+                                               subReaders[i].Commit();
+
+                    // Sync all files we just wrote
+                    foreach(string fileName in segmentInfos.Files(directory, false))
+                    {
+                        if(!synced.ContainsKey(fileName))
+                        {
+                            System.Diagnostics.Debug.Assert(directory.FileExists(fileName));
+                                                       directory.Sync(fileName);
+                            synced[fileName]=fileName;
+                        }   
+                    }
+                                       
+                                       segmentInfos.Commit(directory);
+                                       success = true;
+                               }
+                               finally
+                               {
+                                       
+                                       if (!success)
+                                       {
+                                               
+                                               // Rollback changes that were made to
+                                               // SegmentInfos but failed to get [fully]
+                                               // committed.  This way this reader instance
+                                               // remains consistent (matched to what's
+                                               // actually in the index):
+                                               RollbackCommit();
+                                               
+                                               // Recompute deletable files & remove them (so
+                                               // partially written .del files, etc, are
+                                               // removed):
+                                               deleter.Refresh();
+                                       }
+                               }
+                               
+                               // Have the deleter remove any now unreferenced
+                               // files due to this commit:
+                               deleter.Checkpoint(segmentInfos, true);
+                               deleter.Close();
+
+                maxIndexVersion = segmentInfos.GetVersion();
+                               
+                               if (writeLock != null)
+                               {
+                                       writeLock.Release(); // release write lock
+                                       writeLock = null;
+                               }
+                       }
+                       hasChanges = false;
+               }
+               
+               internal virtual void  StartCommit()
+               {
+                       rollbackHasChanges = hasChanges;
+                       for (int i = 0; i < subReaders.Length; i++)
+                       {
+                               subReaders[i].StartCommit();
+                       }
+               }
+               
+               internal virtual void  RollbackCommit()
+               {
+            hasChanges = rollbackHasChanges;
+            for (int i = 0; i < subReaders.Length; i++)
+            {
+                subReaders[i].RollbackCommit();
+            }
+               }
+
+        public override System.Collections.Generic.IDictionary<string, string> GetCommitUserData()
+               {
+                       EnsureOpen();
+                       return segmentInfos.GetUserData();
+               }
+               
+               public override bool IsCurrent()
+               {
+                       EnsureOpen();
+                       if (writer == null || writer.IsClosed())
+                       {
+                               // we loaded SegmentInfos from the directory
+                               return SegmentInfos.ReadCurrentVersion(directory) == segmentInfos.GetVersion();
+                       }
+                       else
+                       {
+                               return writer.NrtIsCurrent(segmentInfosStart);
+                       }
+               }
+               
+               protected internal override void  DoClose()
+               {
+                       lock (this)
+                       {
+                               System.IO.IOException ioe = null;
+                               normsCache = null;
+                               for (int i = 0; i < subReaders.Length; i++)
+                               {
+                                       // try to close each reader, even if an exception is thrown
+                                       try
+                                       {
+                                               subReaders[i].DecRef();
+                                       }
+                                       catch (System.IO.IOException e)
+                                       {
+                                               if (ioe == null)
+                                                       ioe = e;
+                                       }
+                               }
+
+                // NOTE: only needed in case someone had asked for
+                // FieldCache for top-level reader (which is generally
+                // not a good idea):
+                Mono.Lucene.Net.Search.FieldCache_Fields.DEFAULT.Purge(this);
+
+                               // throw the first exception
+                               if (ioe != null)
+                                       throw ioe;
+                       }
+               }
+
+        public override System.Collections.Generic.ICollection<string> GetFieldNames(IndexReader.FieldOption fieldNames)
+               {
+                       EnsureOpen();
+                       return GetFieldNames(fieldNames, this.subReaders);
+               }
+
+        internal static System.Collections.Generic.ICollection<string> GetFieldNames(IndexReader.FieldOption fieldNames, IndexReader[] subReaders)
+               {
+                       // maintain a unique set of field names
+            System.Collections.Generic.Dictionary<string,string> fieldSet = new System.Collections.Generic.Dictionary<string,string>();
+                       for (int i = 0; i < subReaders.Length; i++)
+                       {
+                               IndexReader reader = subReaders[i];
+                System.Collections.Generic.ICollection<string> names = reader.GetFieldNames(fieldNames);
+                               SupportClass.CollectionsHelper.AddAllIfNotContains(fieldSet, names);
+                       }
+                       return fieldSet.Keys;
+               }
+               
+               public override IndexReader[] GetSequentialSubReaders()
+               {
+                       return subReaders;
+               }
+
+        [Obsolete("Mono.Lucene.Net-2.9.1. This method overrides obsolete member Mono.Lucene.Net.Index.IndexReader.SetDisableFakeNorms(bool)")]
+               public override void  SetDisableFakeNorms(bool disableFakeNorms)
+               {
+                       base.SetDisableFakeNorms(disableFakeNorms);
+                       for (int i = 0; i < subReaders.Length; i++)
+                               subReaders[i].SetDisableFakeNorms(disableFakeNorms);
+               }
+               
+               /// <summary>Returns the directory this index resides in. </summary>
+               public override Directory Directory()
+               {
+                       // Don't ensureOpen here -- in certain cases, when a
+                       // cloned/reopened reader needs to commit, it may call
+                       // this method on the closed original reader
+                       return directory;
+               }
+               
+               public override int GetTermInfosIndexDivisor()
+               {
+                       return termInfosIndexDivisor;
+               }
+               
+               /// <summary> Expert: return the IndexCommit that this reader has opened.
+               /// <p/>
+               /// <p/><b>WARNING</b>: this API is new and experimental and may suddenly change.<p/>
+               /// </summary>
+               public override IndexCommit GetIndexCommit()
+               {
+                       return new ReaderCommit(segmentInfos, directory);
+               }
+               
+               /// <seealso cref="Mono.Lucene.Net.Index.IndexReader.listCommits">
+               /// </seealso>
+               public static new System.Collections.ICollection ListCommits(Directory dir)
+               {
+                       System.String[] files = dir.ListAll();
+                       
+                       System.Collections.ArrayList commits = new System.Collections.ArrayList();
+                       
+                       SegmentInfos latest = new SegmentInfos();
+                       latest.Read(dir);
+                       long currentGen = latest.GetGeneration();
+                       
+                       commits.Add(new ReaderCommit(latest, dir));
+                       
+                       for (int i = 0; i < files.Length; i++)
+                       {
+                               
+                               System.String fileName = files[i];
+                               
+                               if (fileName.StartsWith(IndexFileNames.SEGMENTS) && !fileName.Equals(IndexFileNames.SEGMENTS_GEN) && SegmentInfos.GenerationFromSegmentsFileName(fileName) < currentGen)
+                               {
+                                       
+                                       SegmentInfos sis = new SegmentInfos();
+                                       try
+                                       {
+                                               // IOException allowed to throw there, in case
+                                               // segments_N is corrupt
+                                               sis.Read(dir, fileName);
+                                       }
+                                       catch (System.IO.FileNotFoundException fnfe)
+                                       {
+                                               // LUCENE-948: on NFS (and maybe others), if
+                                               // you have writers switching back and forth
+                                               // between machines, it's very likely that the
+                                               // dir listing will be stale and will claim a
+                                               // file segments_X exists when in fact it
+                                               // doesn't.  So, we catch this and handle it
+                                               // as if the file does not exist
+                                               sis = null;
+                                       }
+                                       
+                                       if (sis != null)
+                                               commits.Add(new ReaderCommit(sis, dir));
+                               }
+                       }
+                       
+                       return commits;
+               }
+               
+               private sealed class ReaderCommit:IndexCommit
+               {
+                       private System.String segmentsFileName;
+                       internal System.Collections.Generic.ICollection<string> files;
+                       internal Directory dir;
+                       internal long generation;
+                       internal long version;
+                       internal bool isOptimized;
+            internal System.Collections.Generic.IDictionary<string, string> userData;
+                       
+                       internal ReaderCommit(SegmentInfos infos, Directory dir)
+                       {
+                               segmentsFileName = infos.GetCurrentSegmentFileName();
+                               this.dir = dir;
+                               userData = infos.GetUserData();
+                files = infos.Files(dir, true);
+                               version = infos.GetVersion();
+                               generation = infos.GetGeneration();
+                               isOptimized = infos.Count == 1 && !infos.Info(0).HasDeletions();
+                       }
+            public override string ToString()
+            {
+                return "DirectoryReader.ReaderCommit(" + segmentsFileName + ")";
+            }
+
+                       public override bool IsOptimized()
+                       {
+                               return isOptimized;
+                       }
+                       
+                       public override System.String GetSegmentsFileName()
+                       {
+                               return segmentsFileName;
+                       }
+
+            public override System.Collections.Generic.ICollection<string> GetFileNames()
+                       {
+                               return files;
+                       }
+                       
+                       public override Directory GetDirectory()
+                       {
+                               return dir;
+                       }
+                       
+                       public override long GetVersion()
+                       {
+                               return version;
+                       }
+                       
+                       public override long GetGeneration()
+                       {
+                               return generation;
+                       }
+                       
+                       public override bool IsDeleted()
+                       {
+                               return false;
+                       }
+
+            public override System.Collections.Generic.IDictionary<string, string> GetUserData()
+                       {
+                               return userData;
+                       }
+
+            public override void Delete()
+            {
+                throw new System.NotSupportedException("This IndexCommit does not support deletions");
+            }
+               }
+               
+               internal class MultiTermEnum:TermEnum
+               {
+                       internal IndexReader topReader; // used for matching TermEnum to TermDocs
+                       private SegmentMergeQueue queue;
+                       
+                       private Term term;
+                       private int docFreq;
+                       internal SegmentMergeInfo[] matchingSegments; // null terminated array of matching segments
+                       
+                       public MultiTermEnum(IndexReader topReader, IndexReader[] readers, int[] starts, Term t)
+                       {
+                               this.topReader = topReader;
+                               queue = new SegmentMergeQueue(readers.Length);
+                               matchingSegments = new SegmentMergeInfo[readers.Length + 1];
+                               for (int i = 0; i < readers.Length; i++)
+                               {
+                                       IndexReader reader = readers[i];
+                                       TermEnum termEnum;
+                                       
+                                       if (t != null)
+                                       {
+                                               termEnum = reader.Terms(t);
+                                       }
+                                       else
+                                               termEnum = reader.Terms();
+                                       
+                                       SegmentMergeInfo smi = new SegmentMergeInfo(starts[i], termEnum, reader);
+                                       smi.ord = i;
+                                       if (t == null?smi.Next():termEnum.Term() != null)
+                                               queue.Put(smi);
+                                       // initialize queue
+                                       else
+                                               smi.Close();
+                               }
+                               
+                               if (t != null && queue.Size() > 0)
+                               {
+                                       Next();
+                               }
+                       }
+                       
+                       public override bool Next()
+                       {
+                               for (int i = 0; i < matchingSegments.Length; i++)
+                               {
+                                       SegmentMergeInfo smi = matchingSegments[i];
+                                       if (smi == null)
+                                               break;
+                                       if (smi.Next())
+                                               queue.Put(smi);
+                                       else
+                                               smi.Close(); // done with segment
+                               }
+                               
+                               int numMatchingSegments = 0;
+                               matchingSegments[0] = null;
+                               
+                               SegmentMergeInfo top = (SegmentMergeInfo) queue.Top();
+                               
+                               if (top == null)
+                               {
+                                       term = null;
+                                       return false;
+                               }
+                               
+                               term = top.term;
+                               docFreq = 0;
+                               
+                               while (top != null && term.CompareTo(top.term) == 0)
+                               {
+                                       matchingSegments[numMatchingSegments++] = top;
+                                       queue.Pop();
+                                       docFreq += top.termEnum.DocFreq(); // increment freq
+                                       top = (SegmentMergeInfo) queue.Top();
+                               }
+                               
+                               matchingSegments[numMatchingSegments] = null;
+                               return true;
+                       }
+                       
+                       public override Term Term()
+                       {
+                               return term;
+                       }
+                       
+                       public override int DocFreq()
+                       {
+                               return docFreq;
+                       }
+                       
+                       public override void  Close()
+                       {
+                               queue.Close();
+                       }
+               }
+               
+               internal class MultiTermDocs : TermDocs
+               {
+                       internal IndexReader topReader; // used for matching TermEnum to TermDocs
+                       protected internal IndexReader[] readers;
+                       protected internal int[] starts;
+                       protected internal Term term;
+                       
+                       protected internal int base_Renamed = 0;
+                       protected internal int pointer = 0;
+                       
+                       private TermDocs[] readerTermDocs;
+                       protected internal TermDocs current; // == readerTermDocs[pointer]
+                       
+                       private MultiTermEnum tenum; // the term enum used for seeking... can be null
+                       internal int matchingSegmentPos; // position into the matching segments from tenum
+                       internal SegmentMergeInfo smi; // current segment mere info... can be null
+                       
+                       public MultiTermDocs(IndexReader topReader, IndexReader[] r, int[] s)
+                       {
+                               this.topReader = topReader;
+                               readers = r;
+                               starts = s;
+                               
+                               readerTermDocs = new TermDocs[r.Length];
+                       }
+                       
+                       public virtual int Doc()
+                       {
+                               return base_Renamed + current.Doc();
+                       }
+                       public virtual int Freq()
+                       {
+                               return current.Freq();
+                       }
+                       
+                       public virtual void  Seek(Term term)
+                       {
+                               this.term = term;
+                               this.base_Renamed = 0;
+                               this.pointer = 0;
+                               this.current = null;
+                               this.tenum = null;
+                               this.smi = null;
+                               this.matchingSegmentPos = 0;
+                       }
+                       
+                       public virtual void  Seek(TermEnum termEnum)
+                       {
+                               Seek(termEnum.Term());
+                               if (termEnum is MultiTermEnum)
+                               {
+                                       tenum = (MultiTermEnum) termEnum;
+                                       if (topReader != tenum.topReader)
+                                               tenum = null;
+                               }
+                       }
+                       
+                       public virtual bool Next()
+                       {
+                               for (; ; )
+                               {
+                                       if (current != null && current.Next())
+                                       {
+                                               return true;
+                                       }
+                                       else if (pointer < readers.Length)
+                                       {
+                                               if (tenum != null)
+                                               {
+                                                       smi = tenum.matchingSegments[matchingSegmentPos++];
+                                                       if (smi == null)
+                                                       {
+                                                               pointer = readers.Length;
+                                                               return false;
+                                                       }
+                                                       pointer = smi.ord;
+                                               }
+                                               base_Renamed = starts[pointer];
+                                               current = TermDocs(pointer++);
+                                       }
+                                       else
+                                       {
+                                               return false;
+                                       }
+                               }
+                       }
+                       
+                       /// <summary>Optimized implementation. </summary>
+                       public virtual int Read(int[] docs, int[] freqs)
+                       {
+                               while (true)
+                               {
+                                       while (current == null)
+                                       {
+                                               if (pointer < readers.Length)
+                                               {
+                                                       // try next segment
+                                                       if (tenum != null)
+                                                       {
+                                                               smi = tenum.matchingSegments[matchingSegmentPos++];
+                                                               if (smi == null)
+                                                               {
+                                                                       pointer = readers.Length;
+                                                                       return 0;
+                                                               }
+                                                               pointer = smi.ord;
+                                                       }
+                                                       base_Renamed = starts[pointer];
+                                                       current = TermDocs(pointer++);
+                                               }
+                                               else
+                                               {
+                                                       return 0;
+                                               }
+                                       }
+                                       int end = current.Read(docs, freqs);
+                                       if (end == 0)
+                                       {
+                                               // none left in segment
+                                               current = null;
+                                       }
+                                       else
+                                       {
+                                               // got some
+                                               int b = base_Renamed; // adjust doc numbers
+                                               for (int i = 0; i < end; i++)
+                                                       docs[i] += b;
+                                               return end;
+                                       }
+                               }
+                       }
+                       
+                       /* A Possible future optimization could skip entire segments */
+                       public virtual bool SkipTo(int target)
+                       {
+                               for (; ; )
+                               {
+                                       if (current != null && current.SkipTo(target - base_Renamed))
+                                       {
+                                               return true;
+                                       }
+                                       else if (pointer < readers.Length)
+                                       {
+                                               if (tenum != null)
+                                               {
+                                                       SegmentMergeInfo smi = tenum.matchingSegments[matchingSegmentPos++];
+                                                       if (smi == null)
+                                                       {
+                                                               pointer = readers.Length;
+                                                               return false;
+                                                       }
+                                                       pointer = smi.ord;
+                                               }
+                                               base_Renamed = starts[pointer];
+                                               current = TermDocs(pointer++);
+                                       }
+                                       else
+                                               return false;
+                               }
+                       }
+                       
+                       private TermDocs TermDocs(int i)
+                       {
+                               TermDocs result = readerTermDocs[i];
+                               if (result == null)
+                                       result = readerTermDocs[i] = TermDocs(readers[i]);
+                               if (smi != null)
+                               {
+                                       System.Diagnostics.Debug.Assert((smi.ord == i));
+                                       System.Diagnostics.Debug.Assert((smi.termEnum.Term().Equals(term)));
+                                       result.Seek(smi.termEnum);
+                               }
+                               else
+                               {
+                                       result.Seek(term);
+                               }
+                               return result;
+                       }
+                       
+                       protected internal virtual TermDocs TermDocs(IndexReader reader)
+                       {
+                               return term == null?reader.TermDocs(null):reader.TermDocs();
+                       }
+                       
+                       public virtual void  Close()
+                       {
+                               for (int i = 0; i < readerTermDocs.Length; i++)
+                               {
+                                       if (readerTermDocs[i] != null)
+                                               readerTermDocs[i].Close();
+                               }
+                       }
+               }
+               
+               internal class MultiTermPositions:MultiTermDocs, TermPositions
+               {
+                       public MultiTermPositions(IndexReader topReader, IndexReader[] r, int[] s):base(topReader, r, s)
+                       {
+                       }
+                       
+                       protected internal override TermDocs TermDocs(IndexReader reader)
+                       {
+                               return (TermDocs) reader.TermPositions();
+                       }
+                       
+                       public virtual int NextPosition()
+                       {
+                               return ((TermPositions) current).NextPosition();
+                       }
+                       
+                       public virtual int GetPayloadLength()
+                       {
+                               return ((TermPositions) current).GetPayloadLength();
+                       }
+                       
+                       public virtual byte[] GetPayload(byte[] data, int offset)
+                       {
+                               return ((TermPositions) current).GetPayload(data, offset);
+                       }
+                       
+                       
+                       // TODO: Remove warning after API has been finalized
+                       public virtual bool IsPayloadAvailable()
+                       {
+                               return ((TermPositions) current).IsPayloadAvailable();
+                       }
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/DocConsumer.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/DocConsumer.cs
new file mode 100644 (file)
index 0000000..19738ad
--- /dev/null
@@ -0,0 +1,31 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       abstract class DocConsumer
+       {
+               public abstract DocConsumerPerThread AddThread(DocumentsWriterThreadState perThread);
+               public abstract void  Flush(System.Collections.ICollection threads, SegmentWriteState state);
+               public abstract void  CloseDocStore(SegmentWriteState state);
+               public abstract void  Abort();
+               public abstract bool FreeRAM();
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/DocConsumerPerThread.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/DocConsumerPerThread.cs
new file mode 100644 (file)
index 0000000..e04f649
--- /dev/null
@@ -0,0 +1,37 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       abstract class DocConsumerPerThread
+       {
+               
+               /// <summary>Process the document. If there is
+               /// something for this document to be done in docID order,
+               /// you should encapsulate that as a
+               /// DocumentsWriter.DocWriter and return it.
+               /// DocumentsWriter then calls finish() on this object
+               /// when it's its turn. 
+               /// </summary>
+               public abstract DocumentsWriter.DocWriter ProcessDocument();
+               
+               public abstract void  Abort();
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/DocFieldConsumer.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/DocFieldConsumer.cs
new file mode 100644 (file)
index 0000000..438c4ea
--- /dev/null
@@ -0,0 +1,55 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       abstract class DocFieldConsumer
+       {
+               
+               internal FieldInfos fieldInfos;
+               
+               /// <summary>Called when DocumentsWriter decides to create a new
+               /// segment 
+               /// </summary>
+               public abstract void  Flush(System.Collections.IDictionary threadsAndFields, SegmentWriteState state);
+               
+               /// <summary>Called when DocumentsWriter decides to close the doc
+               /// stores 
+               /// </summary>
+               public abstract void  CloseDocStore(SegmentWriteState state);
+               
+               /// <summary>Called when an aborting exception is hit </summary>
+               public abstract void  Abort();
+               
+               /// <summary>Add a new thread </summary>
+               public abstract DocFieldConsumerPerThread AddThread(DocFieldProcessorPerThread docFieldProcessorPerThread);
+               
+               /// <summary>Called when DocumentsWriter is using too much RAM.
+               /// The consumer should free RAM, if possible, returning
+               /// true if any RAM was in fact freed. 
+               /// </summary>
+               public abstract bool FreeRAM();
+               
+               internal virtual void  SetFieldInfos(FieldInfos fieldInfos)
+               {
+                       this.fieldInfos = fieldInfos;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/DocFieldConsumerPerField.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/DocFieldConsumerPerField.cs
new file mode 100644 (file)
index 0000000..1499747
--- /dev/null
@@ -0,0 +1,31 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using Fieldable = Mono.Lucene.Net.Documents.Fieldable;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       abstract class DocFieldConsumerPerField
+       {
+               /// <summary>Processes all occurrences of a single field </summary>
+               public abstract void  ProcessFields(Fieldable[] fields, int count);
+               public abstract void  Abort();
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/DocFieldConsumerPerThread.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/DocFieldConsumerPerThread.cs
new file mode 100644 (file)
index 0000000..01ee37a
--- /dev/null
@@ -0,0 +1,30 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       abstract class DocFieldConsumerPerThread
+       {
+               public abstract void  StartDocument();
+               public abstract DocumentsWriter.DocWriter FinishDocument();
+               public abstract DocFieldConsumerPerField AddField(FieldInfo fi);
+               public abstract void  Abort();
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/DocFieldConsumers.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/DocFieldConsumers.cs
new file mode 100644 (file)
index 0000000..d5bf48f
--- /dev/null
@@ -0,0 +1,225 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using ArrayUtil = Mono.Lucene.Net.Util.ArrayUtil;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       /// <summary>This is just a "splitter" class: it lets you wrap two
+       /// DocFieldConsumer instances as a single consumer. 
+       /// </summary>
+       
+       sealed class DocFieldConsumers:DocFieldConsumer
+       {
+               private void  InitBlock()
+               {
+                       docFreeList = new PerDoc[1];
+               }
+               internal DocFieldConsumer one;
+               internal DocFieldConsumer two;
+               
+               public DocFieldConsumers(DocFieldConsumer one, DocFieldConsumer two)
+               {
+                       InitBlock();
+                       this.one = one;
+                       this.two = two;
+               }
+               
+               internal override void  SetFieldInfos(FieldInfos fieldInfos)
+               {
+                       base.SetFieldInfos(fieldInfos);
+                       one.SetFieldInfos(fieldInfos);
+                       two.SetFieldInfos(fieldInfos);
+               }
+               
+               public override void  Flush(System.Collections.IDictionary threadsAndFields, SegmentWriteState state)
+               {
+                       
+                       System.Collections.IDictionary oneThreadsAndFields = new System.Collections.Hashtable();
+                       System.Collections.IDictionary twoThreadsAndFields = new System.Collections.Hashtable();
+                       
+                       System.Collections.IEnumerator it = new System.Collections.Hashtable(threadsAndFields).GetEnumerator();
+                       while (it.MoveNext())
+                       {
+                               
+                               System.Collections.DictionaryEntry entry = (System.Collections.DictionaryEntry) it.Current;
+                               
+                               DocFieldConsumersPerThread perThread = (DocFieldConsumersPerThread) entry.Key;
+                               
+                               System.Collections.ICollection fields = (System.Collections.ICollection) entry.Value;
+                               
+                               System.Collections.IEnumerator fieldsIt = fields.GetEnumerator();
+                               System.Collections.Hashtable oneFields = new System.Collections.Hashtable();
+                               System.Collections.Hashtable twoFields = new System.Collections.Hashtable();
+                               while (fieldsIt.MoveNext())
+                               {
+                                       DocFieldConsumersPerField perField = (DocFieldConsumersPerField) fieldsIt.Current;
+                                       SupportClass.CollectionsHelper.AddIfNotContains(oneFields, perField.one);
+                                       SupportClass.CollectionsHelper.AddIfNotContains(twoFields, perField.two);
+                               }
+                               
+                               oneThreadsAndFields[perThread.one] = oneFields;
+                               twoThreadsAndFields[perThread.two] = twoFields;
+                       }
+                       
+                       
+                       one.Flush(oneThreadsAndFields, state);
+                       two.Flush(twoThreadsAndFields, state);
+               }
+               
+               public override void  CloseDocStore(SegmentWriteState state)
+               {
+                       try
+                       {
+                               one.CloseDocStore(state);
+                       }
+                       finally
+                       {
+                               two.CloseDocStore(state);
+                       }
+               }
+               
+               public override void  Abort()
+               {
+                       try
+                       {
+                               one.Abort();
+                       }
+                       finally
+                       {
+                               two.Abort();
+                       }
+               }
+               
+               public override bool FreeRAM()
+               {
+                       bool any = one.FreeRAM();
+                       any |= two.FreeRAM();
+                       return any;
+               }
+               
+               public override DocFieldConsumerPerThread AddThread(DocFieldProcessorPerThread docFieldProcessorPerThread)
+               {
+                       return new DocFieldConsumersPerThread(docFieldProcessorPerThread, this, one.AddThread(docFieldProcessorPerThread), two.AddThread(docFieldProcessorPerThread));
+               }
+               
+               internal PerDoc[] docFreeList;
+               internal int freeCount;
+               internal int allocCount;
+               
+               internal PerDoc GetPerDoc()
+               {
+                       lock (this)
+                       {
+                               if (freeCount == 0)
+                               {
+                                       allocCount++;
+                                       if (allocCount > docFreeList.Length)
+                                       {
+                                               // Grow our free list up front to make sure we have
+                                               // enough space to recycle all outstanding PerDoc
+                                               // instances
+                                               System.Diagnostics.Debug.Assert(allocCount == 1 + docFreeList.Length);
+                                               docFreeList = new PerDoc[ArrayUtil.GetNextSize(allocCount)];
+                                       }
+                                       return new PerDoc(this);
+                               }
+                               else
+                                       return docFreeList[--freeCount];
+                       }
+               }
+               
+               internal void  FreePerDoc(PerDoc perDoc)
+               {
+                       lock (this)
+                       {
+                               System.Diagnostics.Debug.Assert(freeCount < docFreeList.Length);
+                               docFreeList[freeCount++] = perDoc;
+                       }
+               }
+               
+               internal class PerDoc:DocumentsWriter.DocWriter
+               {
+                       public PerDoc(DocFieldConsumers enclosingInstance)
+                       {
+                               InitBlock(enclosingInstance);
+                       }
+                       private void  InitBlock(DocFieldConsumers enclosingInstance)
+                       {
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private DocFieldConsumers enclosingInstance;
+                       public DocFieldConsumers Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       
+                       internal DocumentsWriter.DocWriter one;
+                       internal DocumentsWriter.DocWriter two;
+                       
+                       public override long SizeInBytes()
+                       {
+                               return one.SizeInBytes() + two.SizeInBytes();
+                       }
+                       
+                       public override void  Finish()
+                       {
+                               try
+                               {
+                                       try
+                                       {
+                                               one.Finish();
+                                       }
+                                       finally
+                                       {
+                                               two.Finish();
+                                       }
+                               }
+                               finally
+                               {
+                                       Enclosing_Instance.FreePerDoc(this);
+                               }
+                       }
+                       
+                       public override void  Abort()
+                       {
+                               try
+                               {
+                                       try
+                                       {
+                                               one.Abort();
+                                       }
+                                       finally
+                                       {
+                                               two.Abort();
+                                       }
+                               }
+                               finally
+                               {
+                                       Enclosing_Instance.FreePerDoc(this);
+                               }
+                       }
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/DocFieldConsumersPerField.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/DocFieldConsumersPerField.cs
new file mode 100644 (file)
index 0000000..cf66628
--- /dev/null
@@ -0,0 +1,57 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using Fieldable = Mono.Lucene.Net.Documents.Fieldable;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       sealed class DocFieldConsumersPerField:DocFieldConsumerPerField
+       {
+               
+               internal DocFieldConsumerPerField one;
+               internal DocFieldConsumerPerField two;
+               internal DocFieldConsumersPerThread perThread;
+               
+               public DocFieldConsumersPerField(DocFieldConsumersPerThread perThread, DocFieldConsumerPerField one, DocFieldConsumerPerField two)
+               {
+                       this.perThread = perThread;
+                       this.one = one;
+                       this.two = two;
+               }
+               
+               public override void  ProcessFields(Fieldable[] fields, int count)
+               {
+                       one.ProcessFields(fields, count);
+                       two.ProcessFields(fields, count);
+               }
+               
+               public override void  Abort()
+               {
+                       try
+                       {
+                               one.Abort();
+                       }
+                       finally
+                       {
+                               two.Abort();
+                       }
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/DocFieldConsumersPerThread.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/DocFieldConsumersPerThread.cs
new file mode 100644 (file)
index 0000000..973998b
--- /dev/null
@@ -0,0 +1,82 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       sealed class DocFieldConsumersPerThread:DocFieldConsumerPerThread
+       {
+               
+               internal DocFieldConsumerPerThread one;
+               internal DocFieldConsumerPerThread two;
+               internal DocFieldConsumers parent;
+               internal DocumentsWriter.DocState docState;
+               
+               public DocFieldConsumersPerThread(DocFieldProcessorPerThread docFieldProcessorPerThread, DocFieldConsumers parent, DocFieldConsumerPerThread one, DocFieldConsumerPerThread two)
+               {
+                       this.parent = parent;
+                       this.one = one;
+                       this.two = two;
+                       docState = docFieldProcessorPerThread.docState;
+               }
+               
+               public override void  StartDocument()
+               {
+                       one.StartDocument();
+                       two.StartDocument();
+               }
+               
+               public override void  Abort()
+               {
+                       try
+                       {
+                               one.Abort();
+                       }
+                       finally
+                       {
+                               two.Abort();
+                       }
+               }
+               
+               public override DocumentsWriter.DocWriter FinishDocument()
+               {
+                       DocumentsWriter.DocWriter oneDoc = one.FinishDocument();
+                       DocumentsWriter.DocWriter twoDoc = two.FinishDocument();
+                       if (oneDoc == null)
+                               return twoDoc;
+                       else if (twoDoc == null)
+                               return oneDoc;
+                       else
+                       {
+                               DocFieldConsumers.PerDoc both = parent.GetPerDoc();
+                               both.docID = docState.docID;
+                               System.Diagnostics.Debug.Assert(oneDoc.docID == docState.docID);
+                               System.Diagnostics.Debug.Assert(twoDoc.docID == docState.docID);
+                               both.one = oneDoc;
+                               both.two = twoDoc;
+                               return both;
+                       }
+               }
+               
+               public override DocFieldConsumerPerField AddField(FieldInfo fi)
+               {
+                       return new DocFieldConsumersPerField(this, one.AddField(fi), two.AddField(fi));
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/DocFieldProcessor.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/DocFieldProcessor.cs
new file mode 100644 (file)
index 0000000..ead3ff0
--- /dev/null
@@ -0,0 +1,91 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       /// <summary> This is a DocConsumer that gathers all fields under the
+       /// same name, and calls per-field consumers to process field
+       /// by field.  This class doesn't doesn't do any "real" work
+       /// of its own: it just forwards the fields to a
+       /// DocFieldConsumer.
+       /// </summary>
+       
+       sealed class DocFieldProcessor:DocConsumer
+       {
+               
+               internal DocumentsWriter docWriter;
+               internal FieldInfos fieldInfos = new FieldInfos();
+               internal DocFieldConsumer consumer;
+               internal StoredFieldsWriter fieldsWriter;
+               
+               public DocFieldProcessor(DocumentsWriter docWriter, DocFieldConsumer consumer)
+               {
+                       this.docWriter = docWriter;
+                       this.consumer = consumer;
+                       consumer.SetFieldInfos(fieldInfos);
+                       fieldsWriter = new StoredFieldsWriter(docWriter, fieldInfos);
+               }
+               
+               public override void  CloseDocStore(SegmentWriteState state)
+               {
+                       consumer.CloseDocStore(state);
+                       fieldsWriter.CloseDocStore(state);
+               }
+               
+               public override void  Flush(System.Collections.ICollection threads, SegmentWriteState state)
+               {
+                       
+                       System.Collections.IDictionary childThreadsAndFields = new System.Collections.Hashtable();
+                       System.Collections.IEnumerator it = threads.GetEnumerator();
+                       while (it.MoveNext())
+                       {
+                               DocFieldProcessorPerThread perThread = (DocFieldProcessorPerThread) ((System.Collections.DictionaryEntry) it.Current).Key;
+                               childThreadsAndFields[perThread.consumer] = perThread.Fields();
+                               perThread.TrimFields(state);
+                       }
+                       fieldsWriter.Flush(state);
+                       consumer.Flush(childThreadsAndFields, state);
+                       
+                       // Important to save after asking consumer to flush so
+                       // consumer can alter the FieldInfo* if necessary.  EG,
+                       // FreqProxTermsWriter does this with
+                       // FieldInfo.storePayload.
+                       System.String fileName = state.SegmentFileName(IndexFileNames.FIELD_INFOS_EXTENSION);
+                       fieldInfos.Write(state.directory, fileName);
+                       SupportClass.CollectionsHelper.AddIfNotContains(state.flushedFiles, fileName);
+               }
+               
+               public override void  Abort()
+               {
+                       fieldsWriter.Abort();
+                       consumer.Abort();
+               }
+               
+               public override bool FreeRAM()
+               {
+                       return consumer.FreeRAM();
+               }
+               
+               public override DocConsumerPerThread AddThread(DocumentsWriterThreadState threadState)
+               {
+                       return new DocFieldProcessorPerThread(threadState, this);
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/DocFieldProcessorPerField.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/DocFieldProcessorPerField.cs
new file mode 100644 (file)
index 0000000..70207a9
--- /dev/null
@@ -0,0 +1,50 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using Fieldable = Mono.Lucene.Net.Documents.Fieldable;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       /// <summary> Holds all per thread, per field state.</summary>
+       
+       sealed class DocFieldProcessorPerField
+       {
+               
+               internal DocFieldConsumerPerField consumer;
+               internal FieldInfo fieldInfo;
+               
+               internal DocFieldProcessorPerField next;
+               internal int lastGen = - 1;
+               
+               internal int fieldCount;
+               internal Fieldable[] fields = new Fieldable[1];
+               
+               public DocFieldProcessorPerField(DocFieldProcessorPerThread perThread, FieldInfo fieldInfo)
+               {
+                       this.consumer = perThread.consumer.AddField(fieldInfo);
+                       this.fieldInfo = fieldInfo;
+               }
+               
+               public void  Abort()
+               {
+                       consumer.Abort();
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/DocFieldProcessorPerThread.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/DocFieldProcessorPerThread.cs
new file mode 100644 (file)
index 0000000..d4e436f
--- /dev/null
@@ -0,0 +1,471 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using Document = Mono.Lucene.Net.Documents.Document;
+using Fieldable = Mono.Lucene.Net.Documents.Fieldable;
+using ArrayUtil = Mono.Lucene.Net.Util.ArrayUtil;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       /// <summary> Gathers all Fieldables for a document under the same
+       /// name, updates FieldInfos, and calls per-field consumers
+       /// to process field by field.
+       /// 
+       /// Currently, only a single thread visits the fields,
+       /// sequentially, for processing.
+       /// </summary>
+       
+       sealed class DocFieldProcessorPerThread:DocConsumerPerThread
+       {
+               private void  InitBlock()
+               {
+                       docFreeList = new PerDoc[1];
+               }
+               
+               internal float docBoost;
+               internal int fieldGen;
+               internal DocFieldProcessor docFieldProcessor;
+               internal FieldInfos fieldInfos;
+               internal DocFieldConsumerPerThread consumer;
+               
+               // Holds all fields seen in current doc
+               internal DocFieldProcessorPerField[] fields = new DocFieldProcessorPerField[1];
+               internal int fieldCount;
+               
+               // Hash table for all fields ever seen
+               internal DocFieldProcessorPerField[] fieldHash = new DocFieldProcessorPerField[2];
+               internal int hashMask = 1;
+               internal int totalFieldCount;
+               
+               internal StoredFieldsWriterPerThread fieldsWriter;
+               
+               internal DocumentsWriter.DocState docState;
+               
+               public DocFieldProcessorPerThread(DocumentsWriterThreadState threadState, DocFieldProcessor docFieldProcessor)
+               {
+                       InitBlock();
+                       this.docState = threadState.docState;
+                       this.docFieldProcessor = docFieldProcessor;
+                       this.fieldInfos = docFieldProcessor.fieldInfos;
+                       this.consumer = docFieldProcessor.consumer.AddThread(this);
+                       fieldsWriter = docFieldProcessor.fieldsWriter.AddThread(docState);
+               }
+               
+               public override void  Abort()
+               {
+                       for (int i = 0; i < fieldHash.Length; i++)
+                       {
+                               DocFieldProcessorPerField field = fieldHash[i];
+                               while (field != null)
+                               {
+                                       DocFieldProcessorPerField next = field.next;
+                                       field.Abort();
+                                       field = next;
+                               }
+                       }
+                       fieldsWriter.Abort();
+                       consumer.Abort();
+               }
+               
+               public System.Collections.ICollection Fields()
+               {
+                       System.Collections.Hashtable fields = new System.Collections.Hashtable();
+                       for (int i = 0; i < fieldHash.Length; i++)
+                       {
+                               DocFieldProcessorPerField field = fieldHash[i];
+                               while (field != null)
+                               {
+                                       fields[field.consumer] = field.consumer;
+                                       field = field.next;
+                               }
+                       }
+                       System.Diagnostics.Debug.Assert(fields.Count == totalFieldCount);
+                       return fields;
+               }
+               
+               /// <summary>If there are fields we've seen but did not see again
+               /// in the last run, then free them up. 
+               /// </summary>
+               
+               internal void  TrimFields(SegmentWriteState state)
+               {
+                       
+                       for (int i = 0; i < fieldHash.Length; i++)
+                       {
+                               DocFieldProcessorPerField perField = fieldHash[i];
+                               DocFieldProcessorPerField lastPerField = null;
+                               
+                               while (perField != null)
+                               {
+                                       
+                                       if (perField.lastGen == - 1)
+                                       {
+                                               
+                                               // This field was not seen since the previous
+                                               // flush, so, free up its resources now
+                                               
+                                               // Unhash
+                                               if (lastPerField == null)
+                                                       fieldHash[i] = perField.next;
+                                               else
+                                                       lastPerField.next = perField.next;
+                                               
+                                               if (state.docWriter.infoStream != null)
+                                                       state.docWriter.infoStream.WriteLine("  purge field=" + perField.fieldInfo.name);
+                                               
+                                               totalFieldCount--;
+                                       }
+                                       else
+                                       {
+                                               // Reset
+                                               perField.lastGen = - 1;
+                                               lastPerField = perField;
+                                       }
+                                       
+                                       perField = perField.next;
+                               }
+                       }
+               }
+               
+               private void  Rehash()
+               {
+                       int newHashSize = (int) (fieldHash.Length * 2);
+                       System.Diagnostics.Debug.Assert(newHashSize > fieldHash.Length);
+                       
+                       DocFieldProcessorPerField[] newHashArray = new DocFieldProcessorPerField[newHashSize];
+                       
+                       // Rehash
+                       int newHashMask = newHashSize - 1;
+                       for (int j = 0; j < fieldHash.Length; j++)
+                       {
+                               DocFieldProcessorPerField fp0 = fieldHash[j];
+                               while (fp0 != null)
+                               {
+                                       int hashPos2 = fp0.fieldInfo.name.GetHashCode() & newHashMask;
+                                       DocFieldProcessorPerField nextFP0 = fp0.next;
+                                       fp0.next = newHashArray[hashPos2];
+                                       newHashArray[hashPos2] = fp0;
+                                       fp0 = nextFP0;
+                               }
+                       }
+                       
+                       fieldHash = newHashArray;
+                       hashMask = newHashMask;
+               }
+               
+               public override DocumentsWriter.DocWriter ProcessDocument()
+               {
+                       
+                       consumer.StartDocument();
+                       fieldsWriter.StartDocument();
+                       
+                       Document doc = docState.doc;
+                       
+                       System.Diagnostics.Debug.Assert(docFieldProcessor.docWriter.writer.TestPoint("DocumentsWriter.ThreadState.init start"));
+                       
+                       fieldCount = 0;
+                       
+                       int thisFieldGen = fieldGen++;
+                       
+                       System.Collections.IList docFields = doc.GetFields();
+                       int numDocFields = docFields.Count;
+                       
+                       // Absorb any new fields first seen in this document.
+                       // Also absorb any changes to fields we had already
+                       // seen before (eg suddenly turning on norms or
+                       // vectors, etc.):
+                       
+                       for (int i = 0; i < numDocFields; i++)
+                       {
+                               Fieldable field = (Fieldable) docFields[i];
+                               System.String fieldName = field.Name();
+                               
+                               // Make sure we have a PerField allocated
+                               int hashPos = fieldName.GetHashCode() & hashMask;
+                               DocFieldProcessorPerField fp = fieldHash[hashPos];
+                               while (fp != null && !fp.fieldInfo.name.Equals(fieldName))
+                                       fp = fp.next;
+                               
+                               if (fp == null)
+                               {
+                                       
+                                       // TODO FI: we need to genericize the "flags" that a
+                                       // field holds, and, how these flags are merged; it
+                                       // needs to be more "pluggable" such that if I want
+                                       // to have a new "thing" my Fields can do, I can
+                                       // easily add it
+                                       FieldInfo fi = fieldInfos.Add(fieldName, field.IsIndexed(), field.IsTermVectorStored(), field.IsStorePositionWithTermVector(), field.IsStoreOffsetWithTermVector(), field.GetOmitNorms(), false, field.GetOmitTf());
+                                       
+                                       fp = new DocFieldProcessorPerField(this, fi);
+                                       fp.next = fieldHash[hashPos];
+                                       fieldHash[hashPos] = fp;
+                                       totalFieldCount++;
+                                       
+                                       if (totalFieldCount >= fieldHash.Length / 2)
+                                               Rehash();
+                               }
+                               else
+                                       fp.fieldInfo.Update(field.IsIndexed(), field.IsTermVectorStored(), field.IsStorePositionWithTermVector(), field.IsStoreOffsetWithTermVector(), field.GetOmitNorms(), false, field.GetOmitTf());
+                               
+                               if (thisFieldGen != fp.lastGen)
+                               {
+                                       
+                                       // First time we're seeing this field for this doc
+                                       fp.fieldCount = 0;
+                                       
+                                       if (fieldCount == fields.Length)
+                                       {
+                                               int newSize = fields.Length * 2;
+                                               DocFieldProcessorPerField[] newArray = new DocFieldProcessorPerField[newSize];
+                                               Array.Copy(fields, 0, newArray, 0, fieldCount);
+                                               fields = newArray;
+                                       }
+                                       
+                                       fields[fieldCount++] = fp;
+                                       fp.lastGen = thisFieldGen;
+                               }
+                               
+                               if (fp.fieldCount == fp.fields.Length)
+                               {
+                                       Fieldable[] newArray = new Fieldable[fp.fields.Length * 2];
+                                       Array.Copy(fp.fields, 0, newArray, 0, fp.fieldCount);
+                                       fp.fields = newArray;
+                               }
+                               
+                               fp.fields[fp.fieldCount++] = field;
+                               if (field.IsStored())
+                               {
+                                       fieldsWriter.AddField(field, fp.fieldInfo);
+                               }
+                       }
+                       
+                       // If we are writing vectors then we must visit
+                       // fields in sorted order so they are written in
+                       // sorted order.  TODO: we actually only need to
+                       // sort the subset of fields that have vectors
+                       // enabled; we could save [small amount of] CPU
+                       // here.
+                       QuickSort(fields, 0, fieldCount - 1);
+                       
+                       for (int i = 0; i < fieldCount; i++)
+                               fields[i].consumer.ProcessFields(fields[i].fields, fields[i].fieldCount);
+
+            if (docState.maxTermPrefix != null && docState.infoStream != null)
+            {
+                docState.infoStream.WriteLine("WARNING: document contains at least one immense term (longer than the max length " + DocumentsWriter.MAX_TERM_LENGTH + "), all of which were skipped.  Please correct the analyzer to not produce such terms.  The prefix of the first immense term is: '" + docState.maxTermPrefix + "...'");
+                docState.maxTermPrefix = null;
+            }
+                       
+                       DocumentsWriter.DocWriter one = fieldsWriter.FinishDocument();
+                       DocumentsWriter.DocWriter two = consumer.FinishDocument();
+                       if (one == null)
+                       {
+                               return two;
+                       }
+                       else if (two == null)
+                       {
+                               return one;
+                       }
+                       else
+                       {
+                               PerDoc both = GetPerDoc();
+                               both.docID = docState.docID;
+                               System.Diagnostics.Debug.Assert(one.docID == docState.docID);
+                               System.Diagnostics.Debug.Assert(two.docID == docState.docID);
+                               both.one = one;
+                               both.two = two;
+                               return both;
+                       }
+               }
+               
+               internal void  QuickSort(DocFieldProcessorPerField[] array, int lo, int hi)
+               {
+                       if (lo >= hi)
+                               return ;
+                       else if (hi == 1 + lo)
+                       {
+                               if (String.CompareOrdinal(array[lo].fieldInfo.name, array[hi].fieldInfo.name) > 0)
+                               {
+                                       DocFieldProcessorPerField tmp = array[lo];
+                                       array[lo] = array[hi];
+                                       array[hi] = tmp;
+                               }
+                               return ;
+                       }
+                       
+                       int mid = SupportClass.Number.URShift((lo + hi), 1);
+                       
+                       if (String.CompareOrdinal(array[lo].fieldInfo.name, array[mid].fieldInfo.name) > 0)
+                       {
+                               DocFieldProcessorPerField tmp = array[lo];
+                               array[lo] = array[mid];
+                               array[mid] = tmp;
+                       }
+                       
+                       if (String.CompareOrdinal(array[mid].fieldInfo.name, array[hi].fieldInfo.name) > 0)
+                       {
+                               DocFieldProcessorPerField tmp = array[mid];
+                               array[mid] = array[hi];
+                               array[hi] = tmp;
+                               
+                               if (String.CompareOrdinal(array[lo].fieldInfo.name, array[mid].fieldInfo.name) > 0)
+                               {
+                                       DocFieldProcessorPerField tmp2 = array[lo];
+                                       array[lo] = array[mid];
+                                       array[mid] = tmp2;
+                               }
+                       }
+                       
+                       int left = lo + 1;
+                       int right = hi - 1;
+                       
+                       if (left >= right)
+                               return ;
+                       
+                       DocFieldProcessorPerField partition = array[mid];
+                       
+                       for (; ; )
+                       {
+                               while (String.CompareOrdinal(array[right].fieldInfo.name, partition.fieldInfo.name) > 0)
+                                       --right;
+                               
+                               while (left < right && String.CompareOrdinal(array[left].fieldInfo.name, partition.fieldInfo.name) <= 0)
+                                       ++left;
+                               
+                               if (left < right)
+                               {
+                                       DocFieldProcessorPerField tmp = array[left];
+                                       array[left] = array[right];
+                                       array[right] = tmp;
+                                       --right;
+                               }
+                               else
+                               {
+                                       break;
+                               }
+                       }
+                       
+                       QuickSort(array, lo, left);
+                       QuickSort(array, left + 1, hi);
+               }
+               
+               internal PerDoc[] docFreeList;
+               internal int freeCount;
+               internal int allocCount;
+               
+               internal PerDoc GetPerDoc()
+               {
+                       lock (this)
+                       {
+                               if (freeCount == 0)
+                               {
+                                       allocCount++;
+                                       if (allocCount > docFreeList.Length)
+                                       {
+                                               // Grow our free list up front to make sure we have
+                                               // enough space to recycle all outstanding PerDoc
+                                               // instances
+                                               System.Diagnostics.Debug.Assert(allocCount == 1 + docFreeList.Length);
+                                               docFreeList = new PerDoc[ArrayUtil.GetNextSize(allocCount)];
+                                       }
+                                       return new PerDoc(this);
+                               }
+                               else
+                                       return docFreeList[--freeCount];
+                       }
+               }
+               
+               internal void  FreePerDoc(PerDoc perDoc)
+               {
+                       lock (this)
+                       {
+                               System.Diagnostics.Debug.Assert(freeCount < docFreeList.Length);
+                               docFreeList[freeCount++] = perDoc;
+                       }
+               }
+               
+               internal class PerDoc:DocumentsWriter.DocWriter
+               {
+                       public PerDoc(DocFieldProcessorPerThread enclosingInstance)
+                       {
+                               InitBlock(enclosingInstance);
+                       }
+                       private void  InitBlock(DocFieldProcessorPerThread enclosingInstance)
+                       {
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private DocFieldProcessorPerThread enclosingInstance;
+                       public DocFieldProcessorPerThread Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       
+                       internal DocumentsWriter.DocWriter one;
+                       internal DocumentsWriter.DocWriter two;
+                       
+                       public override long SizeInBytes()
+                       {
+                               return one.SizeInBytes() + two.SizeInBytes();
+                       }
+                       
+                       public override void  Finish()
+                       {
+                               try
+                               {
+                                       try
+                                       {
+                                               one.Finish();
+                                       }
+                                       finally
+                                       {
+                                               two.Finish();
+                                       }
+                               }
+                               finally
+                               {
+                                       Enclosing_Instance.FreePerDoc(this);
+                               }
+                       }
+                       
+                       public override void  Abort()
+                       {
+                               try
+                               {
+                                       try
+                                       {
+                                               one.Abort();
+                                       }
+                                       finally
+                                       {
+                                               two.Abort();
+                                       }
+                               }
+                               finally
+                               {
+                                       Enclosing_Instance.FreePerDoc(this);
+                               }
+                       }
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/DocInverter.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/DocInverter.cs
new file mode 100644 (file)
index 0000000..f36e68a
--- /dev/null
@@ -0,0 +1,105 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using AttributeSource = Mono.Lucene.Net.Util.AttributeSource;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       /// <summary>This is a DocFieldConsumer that inverts each field,
+       /// separately, from a Document, and accepts a
+       /// InvertedTermsConsumer to process those terms. 
+       /// </summary>
+       
+       sealed class DocInverter:DocFieldConsumer
+       {
+               
+               internal InvertedDocConsumer consumer;
+               internal InvertedDocEndConsumer endConsumer;
+               
+               public DocInverter(InvertedDocConsumer consumer, InvertedDocEndConsumer endConsumer)
+               {
+                       this.consumer = consumer;
+                       this.endConsumer = endConsumer;
+               }
+               
+               internal override void  SetFieldInfos(FieldInfos fieldInfos)
+               {
+                       base.SetFieldInfos(fieldInfos);
+                       consumer.SetFieldInfos(fieldInfos);
+                       endConsumer.SetFieldInfos(fieldInfos);
+               }
+               
+               public override void  Flush(System.Collections.IDictionary threadsAndFields, SegmentWriteState state)
+               {
+                       
+                       System.Collections.IDictionary childThreadsAndFields = new System.Collections.Hashtable();
+                       System.Collections.IDictionary endChildThreadsAndFields = new System.Collections.Hashtable();
+                       
+                       System.Collections.IEnumerator it = new System.Collections.Hashtable(threadsAndFields).GetEnumerator();
+                       while (it.MoveNext())
+                       {
+                               
+                               System.Collections.DictionaryEntry entry = (System.Collections.DictionaryEntry) it.Current;
+                               
+                               DocInverterPerThread perThread = (DocInverterPerThread) entry.Key;
+                               
+                               System.Collections.ICollection fields = (System.Collections.ICollection) entry.Value;
+                               
+                               System.Collections.IEnumerator fieldsIt = fields.GetEnumerator();
+                               System.Collections.Hashtable childFields = new System.Collections.Hashtable();
+                               System.Collections.Hashtable endChildFields = new System.Collections.Hashtable();
+                               while (fieldsIt.MoveNext())
+                               {
+                                       DocInverterPerField perField = (DocInverterPerField) ((System.Collections.DictionaryEntry) fieldsIt.Current).Key;
+                                       childFields[perField.consumer] = perField.consumer;
+                                       endChildFields[perField.endConsumer] = perField.endConsumer;
+                               }
+                               
+                               childThreadsAndFields[perThread.consumer] = childFields;
+                               endChildThreadsAndFields[perThread.endConsumer] = endChildFields;
+                       }
+                       
+                       consumer.Flush(childThreadsAndFields, state);
+                       endConsumer.Flush(endChildThreadsAndFields, state);
+               }
+               
+               public override void  CloseDocStore(SegmentWriteState state)
+               {
+                       consumer.CloseDocStore(state);
+                       endConsumer.CloseDocStore(state);
+               }
+               
+               public override void  Abort()
+               {
+                       consumer.Abort();
+                       endConsumer.Abort();
+               }
+               
+               public override bool FreeRAM()
+               {
+                       return consumer.FreeRAM();
+               }
+               
+               public override DocFieldConsumerPerThread AddThread(DocFieldProcessorPerThread docFieldProcessorPerThread)
+               {
+                       return new DocInverterPerThread(docFieldProcessorPerThread, this);
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/DocInverterPerField.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/DocInverterPerField.cs
new file mode 100644 (file)
index 0000000..916aeec
--- /dev/null
@@ -0,0 +1,240 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using TokenStream = Mono.Lucene.Net.Analysis.TokenStream;
+using OffsetAttribute = Mono.Lucene.Net.Analysis.Tokenattributes.OffsetAttribute;
+using PositionIncrementAttribute = Mono.Lucene.Net.Analysis.Tokenattributes.PositionIncrementAttribute;
+using Fieldable = Mono.Lucene.Net.Documents.Fieldable;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       /// <summary> Holds state for inverting all occurrences of a single
+       /// field in the document.  This class doesn't do anything
+       /// itself; instead, it forwards the tokens produced by
+       /// analysis to its own consumer
+       /// (InvertedDocConsumerPerField).  It also interacts with an
+       /// endConsumer (InvertedDocEndConsumerPerField).
+       /// </summary>
+       
+       sealed class DocInverterPerField:DocFieldConsumerPerField
+       {
+               
+               private DocInverterPerThread perThread;
+               private FieldInfo fieldInfo;
+               internal InvertedDocConsumerPerField consumer;
+               internal InvertedDocEndConsumerPerField endConsumer;
+               internal DocumentsWriter.DocState docState;
+               internal FieldInvertState fieldState;
+               
+               public DocInverterPerField(DocInverterPerThread perThread, FieldInfo fieldInfo)
+               {
+                       this.perThread = perThread;
+                       this.fieldInfo = fieldInfo;
+                       docState = perThread.docState;
+                       fieldState = perThread.fieldState;
+                       this.consumer = perThread.consumer.AddField(this, fieldInfo);
+                       this.endConsumer = perThread.endConsumer.AddField(this, fieldInfo);
+               }
+               
+               public override void  Abort()
+               {
+                       consumer.Abort();
+                       endConsumer.Abort();
+               }
+               
+               public override void  ProcessFields(Fieldable[] fields, int count)
+               {
+                       
+                       fieldState.Reset(docState.doc.GetBoost());
+                       
+                       int maxFieldLength = docState.maxFieldLength;
+                       
+                       bool doInvert = consumer.Start(fields, count);
+                       
+                       for (int i = 0; i < count; i++)
+                       {
+                               
+                               Fieldable field = fields[i];
+                               
+                               // TODO FI: this should be "genericized" to querying
+                               // consumer if it wants to see this particular field
+                               // tokenized.
+                               if (field.IsIndexed() && doInvert)
+                               {
+                                       
+                                       bool anyToken;
+                                       
+                                       if (fieldState.length > 0)
+                                               fieldState.position += docState.analyzer.GetPositionIncrementGap(fieldInfo.name);
+                                       
+                                       if (!field.IsTokenized())
+                                       {
+                                               // un-tokenized field
+                                               System.String stringValue = field.StringValue();
+                                               int valueLength = stringValue.Length;
+                                               perThread.singleTokenTokenStream.Reinit(stringValue, 0, valueLength);
+                                               fieldState.attributeSource = perThread.singleTokenTokenStream;
+                                               consumer.Start(field);
+                                               
+                                               bool success = false;
+                                               try
+                                               {
+                                                       consumer.Add();
+                                                       success = true;
+                                               }
+                                               finally
+                                               {
+                                                       if (!success)
+                                                               docState.docWriter.SetAborting();
+                                               }
+                                               fieldState.offset += valueLength;
+                                               fieldState.length++;
+                                               fieldState.position++;
+                                               anyToken = valueLength > 0;
+                                       }
+                                       else
+                                       {
+                                               // tokenized field
+                                               TokenStream stream;
+                                               TokenStream streamValue = field.TokenStreamValue();
+                                               
+                                               if (streamValue != null)
+                                                       stream = streamValue;
+                                               else
+                                               {
+                                                       // the field does not have a TokenStream,
+                                                       // so we have to obtain one from the analyzer
+                                                       System.IO.TextReader reader; // find or make Reader
+                                                       System.IO.TextReader readerValue = field.ReaderValue();
+                                                       
+                                                       if (readerValue != null)
+                                                               reader = readerValue;
+                                                       else
+                                                       {
+                                                               System.String stringValue = field.StringValue();
+                                                               if (stringValue == null)
+                                                                       throw new System.ArgumentException("field must have either TokenStream, String or Reader value");
+                                                               perThread.stringReader.Init(stringValue);
+                                                               reader = perThread.stringReader;
+                                                       }
+                                                       
+                                                       // Tokenize field and add to postingTable
+                                                       stream = docState.analyzer.ReusableTokenStream(fieldInfo.name, reader);
+                                               }
+                                               
+                                               // reset the TokenStream to the first token
+                                               stream.Reset();
+                                               
+                                               int startLength = fieldState.length;
+                                               
+                                               // deprecated
+                                               bool allowMinus1Position = docState.allowMinus1Position;
+                                               
+                                               try
+                                               {
+                                                       int offsetEnd = fieldState.offset - 1;
+                                                       
+                                                       bool hasMoreTokens = stream.IncrementToken();
+                                                       
+                                                       fieldState.attributeSource = stream;
+                                                       
+                                                       OffsetAttribute offsetAttribute = (OffsetAttribute) fieldState.attributeSource.AddAttribute(typeof(OffsetAttribute));
+                                                       PositionIncrementAttribute posIncrAttribute = (PositionIncrementAttribute) fieldState.attributeSource.AddAttribute(typeof(PositionIncrementAttribute));
+                                                       
+                                                       consumer.Start(field);
+                                                       
+                                                       for (; ; )
+                                                       {
+                                                               
+                                                               // If we hit an exception in stream.next below
+                                                               // (which is fairly common, eg if analyzer
+                                                               // chokes on a given document), then it's
+                                                               // non-aborting and (above) this one document
+                                                               // will be marked as deleted, but still
+                                                               // consume a docID
+                                                               
+                                                               if (!hasMoreTokens)
+                                                                       break;
+                                                               
+                                                               int posIncr = posIncrAttribute.GetPositionIncrement();
+                                                               fieldState.position += posIncr;
+                                                               if (allowMinus1Position || fieldState.position > 0)
+                                                               {
+                                                                       fieldState.position--;
+                                                               }
+                                                               
+                                                               if (posIncr == 0)
+                                                                       fieldState.numOverlap++;
+                                                               
+                                                               bool success = false;
+                                                               try
+                                                               {
+                                                                       // If we hit an exception in here, we abort
+                                                                       // all buffered documents since the last
+                                                                       // flush, on the likelihood that the
+                                                                       // internal state of the consumer is now
+                                                                       // corrupt and should not be flushed to a
+                                                                       // new segment:
+                                                                       consumer.Add();
+                                                                       success = true;
+                                                               }
+                                                               finally
+                                                               {
+                                                                       if (!success)
+                                                                               docState.docWriter.SetAborting();
+                                                               }
+                                                               fieldState.position++;
+                                                               offsetEnd = fieldState.offset + offsetAttribute.EndOffset();
+                                                               if (++fieldState.length >= maxFieldLength)
+                                                               {
+                                                                       if (docState.infoStream != null)
+                                                                               docState.infoStream.WriteLine("maxFieldLength " + maxFieldLength + " reached for field " + fieldInfo.name + ", ignoring following tokens");
+                                                                       break;
+                                                               }
+                                                               
+                                                               hasMoreTokens = stream.IncrementToken();
+                                                       }
+                                                       // trigger streams to perform end-of-stream operations
+                                                       stream.End();
+                                                       
+                                                       fieldState.offset += offsetAttribute.EndOffset();
+                                                       anyToken = fieldState.length > startLength;
+                                               }
+                                               finally
+                                               {
+                                                       stream.Close();
+                                               }
+                                       }
+                                       
+                                       if (anyToken)
+                                               fieldState.offset += docState.analyzer.GetOffsetGap(field);
+                                       fieldState.boost *= field.GetBoost();
+                               }
+                
+                // LUCENE-2387: don't hang onto the field, so GC can
+                // reclaim
+                fields[i] = null;
+                       }
+                       
+                       consumer.Finish();
+                       endConsumer.Finish();
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/DocInverterPerThread.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/DocInverterPerThread.cs
new file mode 100644 (file)
index 0000000..1cc5a3f
--- /dev/null
@@ -0,0 +1,115 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using TokenStream = Mono.Lucene.Net.Analysis.TokenStream;
+using OffsetAttribute = Mono.Lucene.Net.Analysis.Tokenattributes.OffsetAttribute;
+using TermAttribute = Mono.Lucene.Net.Analysis.Tokenattributes.TermAttribute;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       /// <summary>This is a DocFieldConsumer that inverts each field,
+       /// separately, from a Document, and accepts a
+       /// InvertedTermsConsumer to process those terms. 
+       /// </summary>
+       
+       sealed class DocInverterPerThread:DocFieldConsumerPerThread
+       {
+               private void  InitBlock()
+               {
+                       singleTokenTokenStream = new SingleTokenTokenStream();
+               }
+               internal DocInverter docInverter;
+               internal InvertedDocConsumerPerThread consumer;
+               internal InvertedDocEndConsumerPerThread endConsumer;
+               //TODO: change to SingleTokenTokenStream after Token was removed
+               internal SingleTokenTokenStream singleTokenTokenStream;
+               
+               internal class SingleTokenTokenStream:TokenStream
+               {
+                       internal TermAttribute termAttribute;
+                       internal OffsetAttribute offsetAttribute;
+                       
+                       internal SingleTokenTokenStream()
+                       {
+                               termAttribute = (TermAttribute) AddAttribute(typeof(TermAttribute));
+                               offsetAttribute = (OffsetAttribute) AddAttribute(typeof(OffsetAttribute));
+                       }
+                       
+                       public void  Reinit(System.String stringValue, int startOffset, int endOffset)
+                       {
+                               termAttribute.SetTermBuffer(stringValue);
+                               offsetAttribute.SetOffset(startOffset, endOffset);
+                       }
+                       
+                       // this is a dummy, to not throw an UOE because this class does not implement any iteration method
+                       public override bool IncrementToken()
+                       {
+                               throw new System.NotSupportedException();
+                       }
+               }
+               
+               internal DocumentsWriter.DocState docState;
+               
+               internal FieldInvertState fieldState = new FieldInvertState();
+               
+               // Used to read a string value for a field
+               internal ReusableStringReader stringReader = new ReusableStringReader();
+               
+               public DocInverterPerThread(DocFieldProcessorPerThread docFieldProcessorPerThread, DocInverter docInverter)
+               {
+                       InitBlock();
+                       this.docInverter = docInverter;
+                       docState = docFieldProcessorPerThread.docState;
+                       consumer = docInverter.consumer.AddThread(this);
+                       endConsumer = docInverter.endConsumer.AddThread(this);
+               }
+               
+               public override void  StartDocument()
+               {
+                       consumer.StartDocument();
+                       endConsumer.StartDocument();
+               }
+               
+               public override DocumentsWriter.DocWriter FinishDocument()
+               {
+                       // TODO: allow endConsumer.finishDocument to also return
+                       // a DocWriter
+                       endConsumer.FinishDocument();
+                       return consumer.FinishDocument();
+               }
+               
+               public override void  Abort()
+               {
+                       try
+                       {
+                               consumer.Abort();
+                       }
+                       finally
+                       {
+                               endConsumer.Abort();
+                       }
+               }
+               
+               public override DocFieldConsumerPerField AddField(FieldInfo fi)
+               {
+                       return new DocInverterPerField(this, fi);
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/DocumentsWriter.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/DocumentsWriter.cs
new file mode 100644 (file)
index 0000000..ebc6ca9
--- /dev/null
@@ -0,0 +1,2145 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using Analyzer = Mono.Lucene.Net.Analysis.Analyzer;
+using Document = Mono.Lucene.Net.Documents.Document;
+using AlreadyClosedException = Mono.Lucene.Net.Store.AlreadyClosedException;
+using Directory = Mono.Lucene.Net.Store.Directory;
+using ArrayUtil = Mono.Lucene.Net.Util.ArrayUtil;
+using Constants = Mono.Lucene.Net.Util.Constants;
+using IndexSearcher = Mono.Lucene.Net.Search.IndexSearcher;
+using Query = Mono.Lucene.Net.Search.Query;
+using Scorer = Mono.Lucene.Net.Search.Scorer;
+using Similarity = Mono.Lucene.Net.Search.Similarity;
+using Weight = Mono.Lucene.Net.Search.Weight;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       /// <summary> This class accepts multiple added documents and directly
+       /// writes a single segment file.  It does this more
+       /// efficiently than creating a single segment per document
+       /// (with DocumentWriter) and doing standard merges on those
+       /// segments.
+       /// 
+       /// Each added document is passed to the {@link DocConsumer},
+       /// which in turn processes the document and interacts with
+       /// other consumers in the indexing chain.  Certain
+       /// consumers, like {@link StoredFieldsWriter} and {@link
+       /// TermVectorsTermsWriter}, digest a document and
+       /// immediately write bytes to the "doc store" files (ie,
+       /// they do not consume RAM per document, except while they
+       /// are processing the document).
+       /// 
+       /// Other consumers, eg {@link FreqProxTermsWriter} and
+       /// {@link NormsWriter}, buffer bytes in RAM and flush only
+       /// when a new segment is produced.
+       /// Once we have used our allowed RAM buffer, or the number
+       /// of added docs is large enough (in the case we are
+       /// flushing by doc count instead of RAM usage), we create a
+       /// real segment and flush it to the Directory.
+       /// 
+       /// Threads:
+       /// 
+       /// Multiple threads are allowed into addDocument at once.
+       /// There is an initial synchronized call to getThreadState
+       /// which allocates a ThreadState for this thread.  The same
+       /// thread will get the same ThreadState over time (thread
+       /// affinity) so that if there are consistent patterns (for
+       /// example each thread is indexing a different content
+       /// source) then we make better use of RAM.  Then
+       /// processDocument is called on that ThreadState without
+       /// synchronization (most of the "heavy lifting" is in this
+       /// call).  Finally the synchronized "finishDocument" is
+       /// called to flush changes to the directory.
+       /// 
+       /// When flush is called by IndexWriter, or, we flush
+       /// internally when autoCommit=false, we forcefully idle all
+       /// threads and flush only once they are all idle.  This
+       /// means you can call flush with a given thread even while
+       /// other threads are actively adding/deleting documents.
+       /// 
+       /// 
+       /// Exceptions:
+       /// 
+       /// Because this class directly updates in-memory posting
+       /// lists, and flushes stored fields and term vectors
+       /// directly to files in the directory, there are certain
+       /// limited times when an exception can corrupt this state.
+       /// For example, a disk full while flushing stored fields
+       /// leaves this file in a corrupt state.  Or, an OOM
+       /// exception while appending to the in-memory posting lists
+       /// can corrupt that posting list.  We call such exceptions
+       /// "aborting exceptions".  In these cases we must call
+       /// abort() to discard all docs added since the last flush.
+       /// 
+       /// All other exceptions ("non-aborting exceptions") can
+       /// still partially update the index structures.  These
+       /// updates are consistent, but, they represent only a part
+       /// of the document seen up until the exception was hit.
+       /// When this happens, we immediately mark the document as
+       /// deleted so that the document is always atomically ("all
+       /// or none") added to the index.
+       /// </summary>
+       
+       public sealed class DocumentsWriter
+       {
+               internal class AnonymousClassIndexingChain:IndexingChain
+               {
+                       
+                       internal override DocConsumer GetChain(DocumentsWriter documentsWriter)
+                       {
+                               /*
+                               This is the current indexing chain:
+                               
+                               DocConsumer / DocConsumerPerThread
+                               --> code: DocFieldProcessor / DocFieldProcessorPerThread
+                               --> DocFieldConsumer / DocFieldConsumerPerThread / DocFieldConsumerPerField
+                               --> code: DocFieldConsumers / DocFieldConsumersPerThread / DocFieldConsumersPerField
+                               --> code: DocInverter / DocInverterPerThread / DocInverterPerField
+                               --> InvertedDocConsumer / InvertedDocConsumerPerThread / InvertedDocConsumerPerField
+                               --> code: TermsHash / TermsHashPerThread / TermsHashPerField
+                               --> TermsHashConsumer / TermsHashConsumerPerThread / TermsHashConsumerPerField
+                               --> code: FreqProxTermsWriter / FreqProxTermsWriterPerThread / FreqProxTermsWriterPerField
+                               --> code: TermVectorsTermsWriter / TermVectorsTermsWriterPerThread / TermVectorsTermsWriterPerField
+                               --> InvertedDocEndConsumer / InvertedDocConsumerPerThread / InvertedDocConsumerPerField
+                               --> code: NormsWriter / NormsWriterPerThread / NormsWriterPerField
+                               --> code: StoredFieldsWriter / StoredFieldsWriterPerThread / StoredFieldsWriterPerField
+                               */
+                               
+                               // Build up indexing chain:
+                               
+                               TermsHashConsumer termVectorsWriter = new TermVectorsTermsWriter(documentsWriter);
+                               TermsHashConsumer freqProxWriter = new FreqProxTermsWriter();
+                               
+                               InvertedDocConsumer termsHash = new TermsHash(documentsWriter, true, freqProxWriter, new TermsHash(documentsWriter, false, termVectorsWriter, null));
+                               NormsWriter normsWriter = new NormsWriter();
+                               DocInverter docInverter = new DocInverter(termsHash, normsWriter);
+                               return new DocFieldProcessor(documentsWriter, docInverter);
+                       }
+               }
+               private void  InitBlock()
+               {
+                       maxFieldLength = IndexWriter.DEFAULT_MAX_FIELD_LENGTH;
+                       maxBufferedDeleteTerms = IndexWriter.DEFAULT_MAX_BUFFERED_DELETE_TERMS;
+                       ramBufferSize = (long) (IndexWriter.DEFAULT_RAM_BUFFER_SIZE_MB * 1024 * 1024);
+                       waitQueuePauseBytes = (long) (ramBufferSize * 0.1);
+                       waitQueueResumeBytes = (long) (ramBufferSize * 0.05);
+                       freeTrigger = (long) (IndexWriter.DEFAULT_RAM_BUFFER_SIZE_MB * 1024 * 1024 * 1.05);
+                       freeLevel = (long) (IndexWriter.DEFAULT_RAM_BUFFER_SIZE_MB * 1024 * 1024 * 0.95);
+                       maxBufferedDocs = IndexWriter.DEFAULT_MAX_BUFFERED_DOCS;
+                       skipDocWriter = new SkipDocWriter();
+            byteBlockAllocator = new ByteBlockAllocator(this, DocumentsWriter.BYTE_BLOCK_SIZE);
+            perDocAllocator = new ByteBlockAllocator(this,DocumentsWriter.PER_DOC_BLOCK_SIZE);
+                       waitQueue = new WaitQueue(this);
+               }
+               
+               internal IndexWriter writer;
+               internal Directory directory;
+               
+               internal System.String segment; // Current segment we are working on
+               private System.String docStoreSegment; // Current doc-store segment we are writing
+               private int docStoreOffset; // Current starting doc-store offset of current segment
+               
+               private int nextDocID; // Next docID to be added
+               private int numDocsInRAM; // # docs buffered in RAM
+               internal int numDocsInStore; // # docs written to doc stores
+               
+               // Max # ThreadState instances; if there are more threads
+               // than this they share ThreadStates
+               private const int MAX_THREAD_STATE = 5;
+               private DocumentsWriterThreadState[] threadStates = new DocumentsWriterThreadState[0];
+               private System.Collections.Hashtable threadBindings = new System.Collections.Hashtable();
+               
+               private int pauseThreads; // Non-zero when we need all threads to
+               // pause (eg to flush)
+               internal bool flushPending; // True when a thread has decided to flush
+               internal bool bufferIsFull; // True when it's time to write segment
+               private bool aborting; // True if an abort is pending
+               
+               private DocFieldProcessor docFieldProcessor;
+               
+               internal System.IO.StreamWriter infoStream;
+               internal int maxFieldLength;
+               internal Similarity similarity;
+               
+               internal System.Collections.IList newFiles;
+               
+               internal class DocState
+               {
+                       internal DocumentsWriter docWriter;
+                       internal Analyzer analyzer;
+                       internal int maxFieldLength;
+                       internal System.IO.StreamWriter infoStream;
+                       internal Similarity similarity;
+                       internal int docID;
+                       internal Document doc;
+                       internal System.String maxTermPrefix;
+                       
+                       // deprecated
+            [Obsolete]
+                       internal bool allowMinus1Position;
+                       
+                       // Only called by asserts
+                       public bool TestPoint(System.String name)
+                       {
+                               return docWriter.writer.TestPoint(name);
+                       }
+
+            public void Clear()
+            {
+                // don't hold onto doc nor analyzer, in case it is
+                // largish:
+                doc = null;
+                analyzer = null;
+            }
+               }
+               
+               /// <summary>Consumer returns this on each doc.  This holds any
+               /// state that must be flushed synchronized "in docID
+               /// order".  We gather these and flush them in order. 
+               /// </summary>
+               internal abstract class DocWriter
+               {
+                       internal DocWriter next;
+                       internal int docID;
+                       public abstract void  Finish();
+                       public abstract void  Abort();
+                       public abstract long SizeInBytes();
+                       
+                       internal void  SetNext(DocWriter next)
+                       {
+                               this.next = next;
+                       }
+               }
+               
+        /**
+        * Create and return a new DocWriterBuffer.
+        */
+        internal PerDocBuffer NewPerDocBuffer()
+        {
+            return new PerDocBuffer(this);
+        }
+
+        /**
+        * RAMFile buffer for DocWriters.
+        */
+        internal class PerDocBuffer : Mono.Lucene.Net.Store.RAMFile
+        {
+            DocumentsWriter enclosingInstance;
+            public PerDocBuffer(DocumentsWriter enclosingInstance)
+            {
+                this.enclosingInstance = enclosingInstance;
+            }
+            /**
+            * Allocate bytes used from shared pool.
+            */
+            public override byte[] NewBuffer(int size)
+            {
+                System.Diagnostics.Debug.Assert(size == PER_DOC_BLOCK_SIZE);
+                return enclosingInstance.perDocAllocator.GetByteBlock(false);
+            }
+
+            /**
+            * Recycle the bytes used.
+            */
+            internal void Recycle()
+            {
+                lock (this)
+                {
+                    if (buffers.Count > 0)
+                    {
+                        SetLength(0);
+
+                        // Recycle the blocks
+                        enclosingInstance.perDocAllocator.RecycleByteBlocks(buffers);
+                        buffers.Clear();
+                        sizeInBytes = 0;
+
+                        System.Diagnostics.Debug.Assert(NumBuffers() == 0);
+                    }
+                }
+            }
+        }
+
+               /// <summary> The IndexingChain must define the {@link #GetChain(DocumentsWriter)} method
+               /// which returns the DocConsumer that the DocumentsWriter calls to process the
+               /// documents. 
+               /// </summary>
+               internal abstract class IndexingChain
+               {
+                       internal abstract DocConsumer GetChain(DocumentsWriter documentsWriter);
+               }
+               
+               internal static readonly IndexingChain DefaultIndexingChain;
+               
+               internal DocConsumer consumer;
+               
+               // Deletes done after the last flush; these are discarded
+               // on abort
+               private BufferedDeletes deletesInRAM = new BufferedDeletes(false);
+               
+               // Deletes done before the last flush; these are still
+               // kept on abort
+               private BufferedDeletes deletesFlushed = new BufferedDeletes(true);
+               
+               // The max number of delete terms that can be buffered before
+               // they must be flushed to disk.
+               private int maxBufferedDeleteTerms;
+               
+               // How much RAM we can use before flushing.  This is 0 if
+               // we are flushing by doc count instead.
+               private long ramBufferSize;
+               private long waitQueuePauseBytes;
+               private long waitQueueResumeBytes;
+               
+               // If we've allocated 5% over our RAM budget, we then
+               // free down to 95%
+               private long freeTrigger;
+               private long freeLevel;
+               
+               // Flush @ this number of docs.  If ramBufferSize is
+               // non-zero we will flush by RAM usage instead.
+               private int maxBufferedDocs;
+               
+               private int flushedDocCount; // How many docs already flushed to index
+               
+               internal void  UpdateFlushedDocCount(int n)
+               {
+                       lock (this)
+                       {
+                               flushedDocCount += n;
+                       }
+               }
+               internal int GetFlushedDocCount()
+               {
+                       lock (this)
+                       {
+                               return flushedDocCount;
+                       }
+               }
+               internal void  SetFlushedDocCount(int n)
+               {
+                       lock (this)
+                       {
+                               flushedDocCount = n;
+                       }
+               }
+               
+               private bool closed;
+               
+               internal DocumentsWriter(Directory directory, IndexWriter writer, IndexingChain indexingChain)
+               {
+                       InitBlock();
+                       this.directory = directory;
+                       this.writer = writer;
+                       this.similarity = writer.GetSimilarity();
+                       flushedDocCount = writer.MaxDoc();
+                       
+                       consumer = indexingChain.GetChain(this);
+                       if (consumer is DocFieldProcessor)
+                       {
+                               docFieldProcessor = (DocFieldProcessor) consumer;
+                       }
+               }
+               
+               /// <summary>Returns true if any of the fields in the current
+               /// buffered docs have omitTermFreqAndPositions==false 
+               /// </summary>
+               internal bool HasProx()
+               {
+                       return (docFieldProcessor != null)?docFieldProcessor.fieldInfos.HasProx():true;
+               }
+               
+               /// <summary>If non-null, various details of indexing are printed
+               /// here. 
+               /// </summary>
+               internal void  SetInfoStream(System.IO.StreamWriter infoStream)
+               {
+                       lock (this)
+                       {
+                               this.infoStream = infoStream;
+                               for (int i = 0; i < threadStates.Length; i++)
+                                       threadStates[i].docState.infoStream = infoStream;
+                       }
+               }
+               
+               internal void  SetMaxFieldLength(int maxFieldLength)
+               {
+                       lock (this)
+                       {
+                               this.maxFieldLength = maxFieldLength;
+                               for (int i = 0; i < threadStates.Length; i++)
+                                       threadStates[i].docState.maxFieldLength = maxFieldLength;
+                       }
+               }
+               
+               internal void  SetSimilarity(Similarity similarity)
+               {
+                       lock (this)
+                       {
+                               this.similarity = similarity;
+                               for (int i = 0; i < threadStates.Length; i++)
+                                       threadStates[i].docState.similarity = similarity;
+                       }
+               }
+               
+               internal void  SetAllowMinus1Position()
+               {
+                       lock (this)
+                       {
+                               for (int i = 0; i < threadStates.Length; i++)
+                                       threadStates[i].docState.allowMinus1Position = true;
+                       }
+               }
+               
+               /// <summary>Set how much RAM we can use before flushing. </summary>
+               internal void  SetRAMBufferSizeMB(double mb)
+               {
+                       lock (this)
+                       {
+                               if (mb == IndexWriter.DISABLE_AUTO_FLUSH)
+                               {
+                                       ramBufferSize = IndexWriter.DISABLE_AUTO_FLUSH;
+                                       waitQueuePauseBytes = 4 * 1024 * 1024;
+                                       waitQueueResumeBytes = 2 * 1024 * 1024;
+                               }
+                               else
+                               {
+                                       ramBufferSize = (long) (mb * 1024 * 1024);
+                                       waitQueuePauseBytes = (long) (ramBufferSize * 0.1);
+                                       waitQueueResumeBytes = (long) (ramBufferSize * 0.05);
+                                       freeTrigger = (long) (1.05 * ramBufferSize);
+                                       freeLevel = (long) (0.95 * ramBufferSize);
+                               }
+                       }
+               }
+               
+               internal double GetRAMBufferSizeMB()
+               {
+                       lock (this)
+                       {
+                               if (ramBufferSize == IndexWriter.DISABLE_AUTO_FLUSH)
+                               {
+                                       return ramBufferSize;
+                               }
+                               else
+                               {
+                                       return ramBufferSize / 1024.0 / 1024.0;
+                               }
+                       }
+               }
+               
+               /// <summary>Set max buffered docs, which means we will flush by
+               /// doc count instead of by RAM usage. 
+               /// </summary>
+               internal void  SetMaxBufferedDocs(int count)
+               {
+                       maxBufferedDocs = count;
+               }
+               
+               internal int GetMaxBufferedDocs()
+               {
+                       return maxBufferedDocs;
+               }
+               
+               /// <summary>Get current segment name we are writing. </summary>
+               internal System.String GetSegment()
+               {
+                       return segment;
+               }
+               
+               /// <summary>Returns how many docs are currently buffered in RAM. </summary>
+               internal int GetNumDocsInRAM()
+               {
+                       return numDocsInRAM;
+               }
+               
+               /// <summary>Returns the current doc store segment we are writing
+               /// to.  This will be the same as segment when autoCommit
+               /// * is true. 
+               /// </summary>
+               internal System.String GetDocStoreSegment()
+               {
+                       lock (this)
+                       {
+                               return docStoreSegment;
+                       }
+               }
+               
+               /// <summary>Returns the doc offset into the shared doc store for
+               /// the current buffered docs. 
+               /// </summary>
+               internal int GetDocStoreOffset()
+               {
+                       return docStoreOffset;
+               }
+               
+               /// <summary>Closes the current open doc stores an returns the doc
+               /// store segment name.  This returns null if there are *
+               /// no buffered documents. 
+               /// </summary>
+               internal System.String CloseDocStore()
+               {
+                       lock (this)
+                       {
+                               
+                               System.Diagnostics.Debug.Assert(AllThreadsIdle());
+                               
+                               if (infoStream != null)
+                                       Message("closeDocStore: " + openFiles.Count + " files to flush to segment " + docStoreSegment + " numDocs=" + numDocsInStore);
+                               
+                               bool success = false;
+                               
+                               try
+                               {
+                                       InitFlushState(true);
+                                       closedFiles.Clear();
+                                       
+                                       consumer.CloseDocStore(flushState);
+                                       System.Diagnostics.Debug.Assert(0 == openFiles.Count);
+                                       
+                                       System.String s = docStoreSegment;
+                                       docStoreSegment = null;
+                                       docStoreOffset = 0;
+                                       numDocsInStore = 0;
+                                       success = true;
+                                       return s;
+                               }
+                               finally
+                               {
+                                       if (!success)
+                                       {
+                                               Abort();
+                                       }
+                               }
+                       }
+               }
+               
+               private System.Collections.Generic.ICollection<string> abortedFiles; // List of files that were written before last abort()
+               
+               private SegmentWriteState flushState;
+
+        internal System.Collections.Generic.ICollection<string> AbortedFiles()
+               {
+                       return abortedFiles;
+               }
+               
+               internal void  Message(System.String message)
+               {
+                       if (infoStream != null)
+                               writer.Message("DW: " + message);
+               }
+
+        internal System.Collections.Generic.IList<string> openFiles = new System.Collections.Generic.List<string>();
+        internal System.Collections.Generic.IList<string> closedFiles = new System.Collections.Generic.List<string>();
+               
+               /* Returns Collection of files in use by this instance,
+               * including any flushed segments. */
+               internal System.Collections.Generic.IList<string> OpenFiles()
+               {
+                       lock (this)
+                       {
+                string[] tmp = new string[openFiles.Count];
+                openFiles.CopyTo(tmp, 0);
+                               return tmp;
+                       }
+               }
+               
+               internal System.Collections.Generic.IList<string> ClosedFiles()
+               {
+            lock (this)
+            {
+                string[] tmp = new string[closedFiles.Count];
+                closedFiles.CopyTo(tmp, 0);
+                return tmp;
+            }
+               }
+               
+               internal void  AddOpenFile(System.String name)
+               {
+                       lock (this)
+                       {
+                               System.Diagnostics.Debug.Assert(!openFiles.Contains(name));
+                               openFiles.Add(name);
+                       }
+               }
+               
+               internal void  RemoveOpenFile(System.String name)
+               {
+                       lock (this)
+                       {
+                               System.Diagnostics.Debug.Assert(openFiles.Contains(name));
+                               openFiles.Remove(name);
+                               closedFiles.Add(name);
+                       }
+               }
+               
+               internal void  SetAborting()
+               {
+                       lock (this)
+                       {
+                               aborting = true;
+                       }
+               }
+               
+               /// <summary>Called if we hit an exception at a bad time (when
+               /// updating the index files) and must discard all
+               /// currently buffered docs.  This resets our state,
+               /// discarding any docs added since last flush. 
+               /// </summary>
+               internal void  Abort()
+               {
+                       lock (this)
+                       {
+                               
+                               try
+                               {
+                                       if (infoStream != null)
+                                               Message("docWriter: now abort");
+                                       
+                                       // Forcefully remove waiting ThreadStates from line
+                                       waitQueue.Abort();
+                                       
+                                       // Wait for all other threads to finish with
+                                       // DocumentsWriter:
+                                       PauseAllThreads();
+                                       
+                                       try
+                                       {
+                                               
+                                               System.Diagnostics.Debug.Assert(0 == waitQueue.numWaiting);
+                                               
+                                               waitQueue.waitingBytes = 0;
+                                               
+                                               try
+                                               {
+                                                       abortedFiles = OpenFiles();
+                                               }
+                                               catch (System.Exception t)
+                                               {
+                                                       abortedFiles = null;
+                                               }
+                                               
+                                               deletesInRAM.Clear();
+                        deletesFlushed.Clear();
+                                               openFiles.Clear();
+                                               
+                                               for (int i = 0; i < threadStates.Length; i++)
+                                                       try
+                                                       {
+                                                               threadStates[i].consumer.Abort();
+                                                       }
+                                                       catch (System.Exception t)
+                                                       {
+                                                       }
+                                               
+                                               try
+                                               {
+                                                       consumer.Abort();
+                                               }
+                                               catch (System.Exception t)
+                                               {
+                                               }
+                                               
+                                               docStoreSegment = null;
+                                               numDocsInStore = 0;
+                                               docStoreOffset = 0;
+                                               
+                                               // Reset all postings data
+                                               DoAfterFlush();
+                                       }
+                                       finally
+                                       {
+                                               ResumeAllThreads();
+                                       }
+                               }
+                               finally
+                               {
+                                       aborting = false;
+                                       System.Threading.Monitor.PulseAll(this);
+                    if (infoStream != null)
+                    {
+                        Message("docWriter: done abort; abortedFiles=" + abortedFiles);
+                    }
+                               }
+                       }
+               }
+               
+               /// <summary>Reset after a flush </summary>
+               private void  DoAfterFlush()
+               {
+                       // All ThreadStates should be idle when we are called
+                       System.Diagnostics.Debug.Assert(AllThreadsIdle());
+                       threadBindings.Clear();
+                       waitQueue.Reset();
+                       segment = null;
+                       numDocsInRAM = 0;
+                       nextDocID = 0;
+                       bufferIsFull = false;
+                       flushPending = false;
+                       for (int i = 0; i < threadStates.Length; i++)
+                               threadStates[i].DoAfterFlush();
+                       numBytesUsed = 0;
+               }
+               
+               // Returns true if an abort is in progress
+               internal bool PauseAllThreads()
+               {
+                       lock (this)
+                       {
+                               pauseThreads++;
+                               while (!AllThreadsIdle())
+                               {
+                                       try
+                                       {
+                                               System.Threading.Monitor.Wait(this);
+                                       }
+                                       catch (System.Threading.ThreadInterruptedException ie)
+                                       {
+                                               // In 3.0 we will change this to throw
+                                               // InterruptedException instead
+                                               SupportClass.ThreadClass.Current().Interrupt();
+                                               throw new System.SystemException(ie.Message, ie);
+                                       }
+                               }
+                               
+                               return aborting;
+                       }
+               }
+               
+               internal void  ResumeAllThreads()
+               {
+                       lock (this)
+                       {
+                               pauseThreads--;
+                               System.Diagnostics.Debug.Assert(pauseThreads >= 0);
+                               if (0 == pauseThreads)
+                                       System.Threading.Monitor.PulseAll(this);
+                       }
+               }
+               
+               private bool AllThreadsIdle()
+               {
+                       lock (this)
+                       {
+                               for (int i = 0; i < threadStates.Length; i++)
+                                       if (!threadStates[i].isIdle)
+                                               return false;
+                               return true;
+                       }
+               }
+               
+               internal bool AnyChanges()
+               {
+                       lock (this)
+                       {
+                               return numDocsInRAM != 0 || deletesInRAM.numTerms != 0 || deletesInRAM.docIDs.Count != 0 || deletesInRAM.queries.Count != 0;
+                       }
+               }
+               
+               private void  InitFlushState(bool onlyDocStore)
+               {
+                       lock (this)
+                       {
+                               InitSegmentName(onlyDocStore);
+                               flushState = new SegmentWriteState(this, directory, segment, docStoreSegment, numDocsInRAM, numDocsInStore, writer.GetTermIndexInterval());
+                       }
+               }
+               
+               /// <summary>Flush all pending docs to a new segment </summary>
+               internal int Flush(bool closeDocStore)
+               {
+                       lock (this)
+                       {
+                               
+                               System.Diagnostics.Debug.Assert(AllThreadsIdle());
+                               
+                               System.Diagnostics.Debug.Assert(numDocsInRAM > 0);
+                               
+                               System.Diagnostics.Debug.Assert(nextDocID == numDocsInRAM);
+                               System.Diagnostics.Debug.Assert(waitQueue.numWaiting == 0);
+                               System.Diagnostics.Debug.Assert(waitQueue.waitingBytes == 0);
+                               
+                               InitFlushState(false);
+                               
+                               docStoreOffset = numDocsInStore;
+                               
+                               if (infoStream != null)
+                                       Message("flush postings as segment " + flushState.segmentName + " numDocs=" + numDocsInRAM);
+                               
+                               bool success = false;
+                               
+                               try
+                               {
+                                       
+                                       if (closeDocStore)
+                                       {
+                                               System.Diagnostics.Debug.Assert(flushState.docStoreSegmentName != null);
+                                               System.Diagnostics.Debug.Assert(flushState.docStoreSegmentName.Equals(flushState.segmentName));
+                                               CloseDocStore();
+                                               flushState.numDocsInStore = 0;
+                                       }
+                                       
+                                       System.Collections.Hashtable threads = new System.Collections.Hashtable();
+                                       for (int i = 0; i < threadStates.Length; i++)
+                                               threads[threadStates[i].consumer] = threadStates[i].consumer;
+                                       consumer.Flush(threads, flushState);
+                                       
+                                       if (infoStream != null)
+                                       {
+                        SegmentInfo si = new SegmentInfo(flushState.segmentName, flushState.numDocs, directory);
+                        long newSegmentSize = si.SizeInBytes();
+                        System.String message = System.String.Format(nf, "  oldRAMSize={0:d} newFlushedSize={1:d} docs/MB={2:f} new/old={3:%}",
+                            new System.Object[] { numBytesUsed, newSegmentSize, (numDocsInRAM / (newSegmentSize / 1024.0 / 1024.0)), (100.0 * newSegmentSize / numBytesUsed) });
+                                               Message(message);
+                                       }
+                                       
+                                       flushedDocCount += flushState.numDocs;
+                                       
+                                       DoAfterFlush();
+                                       
+                                       success = true;
+                               }
+                               finally
+                               {
+                                       if (!success)
+                                       {
+                                               Abort();
+                                       }
+                               }
+                               
+                               System.Diagnostics.Debug.Assert(waitQueue.waitingBytes == 0);
+                               
+                               return flushState.numDocs;
+                       }
+               }
+
+        internal System.Collections.ICollection GetFlushedFiles()
+        {
+            return flushState.flushedFiles;
+        }
+               
+               /// <summary>Build compound file for the segment we just flushed </summary>
+               internal void  CreateCompoundFile(System.String segment)
+               {
+                       
+                       CompoundFileWriter cfsWriter = new CompoundFileWriter(directory, segment + "." + IndexFileNames.COMPOUND_FILE_EXTENSION);
+                       System.Collections.IEnumerator it = flushState.flushedFiles.GetEnumerator();
+                       while (it.MoveNext())
+                       {
+                               cfsWriter.AddFile((System.String) ((System.Collections.DictionaryEntry) it.Current).Key);
+                       }
+                       
+                       // Perform the merge
+                       cfsWriter.Close();
+               }
+               
+               /// <summary>Set flushPending if it is not already set and returns
+               /// whether it was set. This is used by IndexWriter to
+               /// trigger a single flush even when multiple threads are
+               /// trying to do so. 
+               /// </summary>
+               internal bool SetFlushPending()
+               {
+                       lock (this)
+                       {
+                               if (flushPending)
+                                       return false;
+                               else
+                               {
+                                       flushPending = true;
+                                       return true;
+                               }
+                       }
+               }
+               
+               internal void  ClearFlushPending()
+               {
+                       lock (this)
+                       {
+                               flushPending = false;
+                       }
+               }
+               
+               internal void  PushDeletes()
+               {
+                       lock (this)
+                       {
+                               deletesFlushed.Update(deletesInRAM);
+                       }
+               }
+               
+               internal void  Close()
+               {
+                       lock (this)
+                       {
+                               closed = true;
+                               System.Threading.Monitor.PulseAll(this);
+                       }
+               }
+               
+               internal void  InitSegmentName(bool onlyDocStore)
+               {
+                       lock (this)
+                       {
+                               if (segment == null && (!onlyDocStore || docStoreSegment == null))
+                               {
+                                       segment = writer.NewSegmentName();
+                                       System.Diagnostics.Debug.Assert(numDocsInRAM == 0);
+                               }
+                               if (docStoreSegment == null)
+                               {
+                                       docStoreSegment = segment;
+                                       System.Diagnostics.Debug.Assert(numDocsInStore == 0);
+                               }
+                       }
+               }
+               
+               /// <summary>Returns a free (idle) ThreadState that may be used for
+               /// indexing this one document.  This call also pauses if a
+               /// flush is pending.  If delTerm is non-null then we
+               /// buffer this deleted term after the thread state has
+               /// been acquired. 
+               /// </summary>
+               internal DocumentsWriterThreadState GetThreadState(Document doc, Term delTerm)
+               {
+                       lock (this)
+                       {
+                               
+                               // First, find a thread state.  If this thread already
+                               // has affinity to a specific ThreadState, use that one
+                               // again.
+                               DocumentsWriterThreadState state = (DocumentsWriterThreadState) threadBindings[SupportClass.ThreadClass.Current()];
+                               if (state == null)
+                               {
+                                       
+                                       // First time this thread has called us since last
+                                       // flush.  Find the least loaded thread state:
+                                       DocumentsWriterThreadState minThreadState = null;
+                                       for (int i = 0; i < threadStates.Length; i++)
+                                       {
+                                               DocumentsWriterThreadState ts = threadStates[i];
+                                               if (minThreadState == null || ts.numThreads < minThreadState.numThreads)
+                                                       minThreadState = ts;
+                                       }
+                                       if (minThreadState != null && (minThreadState.numThreads == 0 || threadStates.Length >= MAX_THREAD_STATE))
+                                       {
+                                               state = minThreadState;
+                                               state.numThreads++;
+                                       }
+                                       else
+                                       {
+                                               // Just create a new "private" thread state
+                                               DocumentsWriterThreadState[] newArray = new DocumentsWriterThreadState[1 + threadStates.Length];
+                                               if (threadStates.Length > 0)
+                                                       Array.Copy(threadStates, 0, newArray, 0, threadStates.Length);
+                                               state = newArray[threadStates.Length] = new DocumentsWriterThreadState(this);
+                                               threadStates = newArray;
+                                       }
+                                       threadBindings[SupportClass.ThreadClass.Current()] = state;
+                               }
+                               
+                               // Next, wait until my thread state is idle (in case
+                               // it's shared with other threads) and for threads to
+                               // not be paused nor a flush pending:
+                               WaitReady(state);
+                               
+                               // Allocate segment name if this is the first doc since
+                               // last flush:
+                               InitSegmentName(false);
+                               
+                               state.isIdle = false;
+                               
+                               bool success = false;
+                               try
+                               {
+                                       state.docState.docID = nextDocID;
+                                       
+                                       System.Diagnostics.Debug.Assert(writer.TestPoint("DocumentsWriter.ThreadState.init start"));
+                                       
+                                       if (delTerm != null)
+                                       {
+                                               AddDeleteTerm(delTerm, state.docState.docID);
+                                               state.doFlushAfter = TimeToFlushDeletes();
+                                       }
+                                       
+                                       System.Diagnostics.Debug.Assert(writer.TestPoint("DocumentsWriter.ThreadState.init after delTerm"));
+                                       
+                                       nextDocID++;
+                                       numDocsInRAM++;
+                                       
+                                       // We must at this point commit to flushing to ensure we
+                                       // always get N docs when we flush by doc count, even if
+                                       // > 1 thread is adding documents:
+                                       if (!flushPending && maxBufferedDocs != IndexWriter.DISABLE_AUTO_FLUSH && numDocsInRAM >= maxBufferedDocs)
+                                       {
+                                               flushPending = true;
+                                               state.doFlushAfter = true;
+                                       }
+                                       
+                                       success = true;
+                               }
+                               finally
+                               {
+                                       if (!success)
+                                       {
+                                               // Forcefully idle this ThreadState:
+                                               state.isIdle = true;
+                                               System.Threading.Monitor.PulseAll(this);
+                                               if (state.doFlushAfter)
+                                               {
+                                                       state.doFlushAfter = false;
+                                                       flushPending = false;
+                                               }
+                                       }
+                               }
+                               
+                               return state;
+                       }
+               }
+               
+               /// <summary>Returns true if the caller (IndexWriter) should now
+               /// flush. 
+               /// </summary>
+               internal bool AddDocument(Document doc, Analyzer analyzer)
+               {
+                       return UpdateDocument(doc, analyzer, null);
+               }
+               
+               internal bool UpdateDocument(Term t, Document doc, Analyzer analyzer)
+               {
+                       return UpdateDocument(doc, analyzer, t);
+               }
+               
+               internal bool UpdateDocument(Document doc, Analyzer analyzer, Term delTerm)
+               {
+                       
+                       // This call is synchronized but fast
+                       DocumentsWriterThreadState state = GetThreadState(doc, delTerm);
+                       
+                       DocState docState = state.docState;
+                       docState.doc = doc;
+                       docState.analyzer = analyzer;
+
+            bool doReturnFalse = false; // {{Aroush-2.9}} to handle return from finally clause
+
+                       bool success = false;
+                       try
+                       {
+                               // This call is not synchronized and does all the
+                               // work
+                               DocWriter perDoc;
+                try
+                {
+                    perDoc = state.consumer.ProcessDocument();
+                }
+                finally
+                {
+                    docState.Clear();
+                }
+                               // This call is synchronized but fast
+                               FinishDocument(state, perDoc);
+                               success = true;
+                       }
+                       finally
+                       {
+                               if (!success)
+                               {
+                                       lock (this)
+                                       {
+                                               
+                                               if (aborting)
+                                               {
+                                                       state.isIdle = true;
+                                                       System.Threading.Monitor.PulseAll(this);
+                                                       Abort();
+                                               }
+                                               else
+                                               {
+                                                       skipDocWriter.docID = docState.docID;
+                                                       bool success2 = false;
+                                                       try
+                                                       {
+                                                               waitQueue.Add(skipDocWriter);
+                                                               success2 = true;
+                                                       }
+                                                       finally
+                                                       {
+                                                               if (!success2)
+                                                               {
+                                                                       state.isIdle = true;
+                                                                       System.Threading.Monitor.PulseAll(this);
+                                                                       Abort();
+                                                                       // return false; // {{Aroush-2.9}} this 'return false' is move to outside finally
+                                    doReturnFalse = true;
+                                                               }
+                                                       }
+
+                            if (!doReturnFalse)   // {{Aroush-2.9}} added because of the above 'return false' removal
+                            {
+                                                               state.isIdle = true;
+                                                               System.Threading.Monitor.PulseAll(this);
+                                                       
+                                                               // If this thread state had decided to flush, we
+                                                               // must clear it so another thread can flush
+                                                               if (state.doFlushAfter)
+                                                               {
+                                                                       state.doFlushAfter = false;
+                                                                       flushPending = false;
+                                                                       System.Threading.Monitor.PulseAll(this);
+                                                               }
+                                                               
+                                                               // Immediately mark this document as deleted
+                                                               // since likely it was partially added.  This
+                                                               // keeps indexing as "all or none" (atomic) when
+                                                               // adding a document:
+                                                               AddDeleteDocID(state.docState.docID);
+                            }
+                                               }
+                                       }
+                               }
+                       }
+
+            if (doReturnFalse)  // {{Aroush-2.9}} see comment abouve
+            {
+                return false;
+            }
+
+                       return state.doFlushAfter || TimeToFlushDeletes();
+               }
+               
+               // for testing
+               internal int GetNumBufferedDeleteTerms()
+               {
+                       lock (this)
+                       {
+                               return deletesInRAM.numTerms; 
+                       }
+               }
+               
+               // for testing
+               internal System.Collections.IDictionary GetBufferedDeleteTerms()
+               {
+                       lock (this)
+                       {
+                               return deletesInRAM.terms;
+                       }
+               }
+               
+               /// <summary>Called whenever a merge has completed and the merged segments had deletions </summary>
+               internal void  RemapDeletes(SegmentInfos infos, int[][] docMaps, int[] delCounts, MergePolicy.OneMerge merge, int mergeDocCount)
+               {
+                       lock (this)
+                       {
+                               if (docMaps == null)
+                               // The merged segments had no deletes so docIDs did not change and we have nothing to do
+                                       return ;
+                               MergeDocIDRemapper mapper = new MergeDocIDRemapper(infos, docMaps, delCounts, merge, mergeDocCount);
+                               deletesInRAM.Remap(mapper, infos, docMaps, delCounts, merge, mergeDocCount);
+                               deletesFlushed.Remap(mapper, infos, docMaps, delCounts, merge, mergeDocCount);
+                               flushedDocCount -= mapper.docShift;
+                       }
+               }
+               
+               private void  WaitReady(DocumentsWriterThreadState state)
+               {
+                       lock (this)
+                       {
+                               
+                               while (!closed && ((state != null && !state.isIdle) || pauseThreads != 0 || flushPending || aborting))
+                               {
+                                       try
+                                       {
+                                               System.Threading.Monitor.Wait(this);
+                                       }
+                                       catch (System.Threading.ThreadInterruptedException ie)
+                                       {
+                                               // In 3.0 we will change this to throw
+                                               // InterruptedException instead
+                                               SupportClass.ThreadClass.Current().Interrupt();
+                                               throw new System.SystemException(ie.Message, ie);
+                                       }
+                               }
+                               
+                               if (closed)
+                                       throw new AlreadyClosedException("this IndexWriter is closed");
+                       }
+               }
+               
+               internal bool BufferDeleteTerms(Term[] terms)
+               {
+                       lock (this)
+                       {
+                               WaitReady(null);
+                               for (int i = 0; i < terms.Length; i++)
+                                       AddDeleteTerm(terms[i], numDocsInRAM);
+                               return TimeToFlushDeletes();
+                       }
+               }
+               
+               internal bool BufferDeleteTerm(Term term)
+               {
+                       lock (this)
+                       {
+                               WaitReady(null);
+                               AddDeleteTerm(term, numDocsInRAM);
+                               return TimeToFlushDeletes();
+                       }
+               }
+               
+               internal bool BufferDeleteQueries(Query[] queries)
+               {
+                       lock (this)
+                       {
+                               WaitReady(null);
+                               for (int i = 0; i < queries.Length; i++)
+                                       AddDeleteQuery(queries[i], numDocsInRAM);
+                               return TimeToFlushDeletes();
+                       }
+               }
+               
+               internal bool BufferDeleteQuery(Query query)
+               {
+                       lock (this)
+                       {
+                               WaitReady(null);
+                               AddDeleteQuery(query, numDocsInRAM);
+                               return TimeToFlushDeletes();
+                       }
+               }
+               
+               internal bool DeletesFull()
+               {
+                       lock (this)
+                       {
+                               return (ramBufferSize != IndexWriter.DISABLE_AUTO_FLUSH && (deletesInRAM.bytesUsed + deletesFlushed.bytesUsed + numBytesUsed) >= ramBufferSize) || (maxBufferedDeleteTerms != IndexWriter.DISABLE_AUTO_FLUSH && ((deletesInRAM.Size() + deletesFlushed.Size()) >= maxBufferedDeleteTerms));
+                       }
+               }
+               
+               internal bool DoApplyDeletes()
+               {
+                       lock (this)
+                       {
+                               // Very similar to deletesFull(), except we don't count
+                               // numBytesAlloc, because we are checking whether
+                               // deletes (alone) are consuming too many resources now
+                               // and thus should be applied.  We apply deletes if RAM
+                               // usage is > 1/2 of our allowed RAM buffer, to prevent
+                               // too-frequent flushing of a long tail of tiny segments
+                               // when merges (which always apply deletes) are
+                               // infrequent.
+                               return (ramBufferSize != IndexWriter.DISABLE_AUTO_FLUSH && (deletesInRAM.bytesUsed + deletesFlushed.bytesUsed) >= ramBufferSize / 2) || (maxBufferedDeleteTerms != IndexWriter.DISABLE_AUTO_FLUSH && ((deletesInRAM.Size() + deletesFlushed.Size()) >= maxBufferedDeleteTerms));
+                       }
+               }
+               
+               private bool TimeToFlushDeletes()
+               {
+                       lock (this)
+                       {
+                               return (bufferIsFull || DeletesFull()) && SetFlushPending();
+                       }
+               }
+               
+               internal void  SetMaxBufferedDeleteTerms(int maxBufferedDeleteTerms)
+               {
+                       this.maxBufferedDeleteTerms = maxBufferedDeleteTerms;
+               }
+               
+               internal int GetMaxBufferedDeleteTerms()
+               {
+                       return maxBufferedDeleteTerms;
+               }
+               
+               internal bool HasDeletes()
+               {
+                       lock (this)
+                       {
+                               return deletesFlushed.Any();
+                       }
+               }
+               
+               internal bool ApplyDeletes(SegmentInfos infos)
+               {
+                       lock (this)
+                       {
+                               
+                               if (!HasDeletes())
+                                       return false;
+                               
+                               if (infoStream != null)
+                                       Message("apply " + deletesFlushed.numTerms + " buffered deleted terms and " + deletesFlushed.docIDs.Count + " deleted docIDs and " + deletesFlushed.queries.Count + " deleted queries on " + (+ infos.Count) + " segments.");
+                               
+                               int infosEnd = infos.Count;
+                               
+                               int docStart = 0;
+                               bool any = false;
+                               for (int i = 0; i < infosEnd; i++)
+                               {
+                                       
+                                       // Make sure we never attempt to apply deletes to
+                                       // segment in external dir
+                                       System.Diagnostics.Debug.Assert(infos.Info(i).dir == directory);
+                                       
+                                       SegmentReader reader = writer.readerPool.Get(infos.Info(i), false);
+                                       try
+                                       {
+                                               any |= ApplyDeletes(reader, docStart);
+                                               docStart += reader.MaxDoc();
+                                       }
+                                       finally
+                                       {
+                                               writer.readerPool.Release(reader);
+                                       }
+                               }
+                               
+                               deletesFlushed.Clear();
+                               
+                               return any;
+                       }
+               }
+
+        // used only by assert
+        private Term lastDeleteTerm;
+
+        // used only by assert
+        private bool CheckDeleteTerm(Term term) 
+        {
+            if (term != null) {
+                System.Diagnostics.Debug.Assert(lastDeleteTerm == null || term.CompareTo(lastDeleteTerm) > 0, "lastTerm=" + lastDeleteTerm + " vs term=" + term);
+            }
+            lastDeleteTerm = term;
+            return true;
+        }
+               
+               // Apply buffered delete terms, queries and docIDs to the
+               // provided reader
+               private bool ApplyDeletes(IndexReader reader, int docIDStart)
+               {
+                       lock (this)
+                       {
+                               
+                               int docEnd = docIDStart + reader.MaxDoc();
+                               bool any = false;
+                               
+                System.Diagnostics.Debug.Assert(CheckDeleteTerm(null));
+
+                               // Delete by term
+                //System.Collections.IEnumerator iter = new System.Collections.Hashtable(deletesFlushed.terms).GetEnumerator();
+                               System.Collections.IEnumerator iter = deletesFlushed.terms.GetEnumerator();
+                               TermDocs docs = reader.TermDocs();
+                               try
+                               {
+                                       while (iter.MoveNext())
+                                       {
+                                               System.Collections.DictionaryEntry entry = (System.Collections.DictionaryEntry) iter.Current;
+                                               Term term = (Term) entry.Key;
+                                               // LUCENE-2086: we should be iterating a TreeMap,
+                        // here, so terms better be in order:
+                        System.Diagnostics.Debug.Assert(CheckDeleteTerm(term));
+                                               docs.Seek(term);
+                                               int limit = ((BufferedDeletes.Num) entry.Value).GetNum();
+                                               while (docs.Next())
+                                               {
+                                                       int docID = docs.Doc();
+                                                       if (docIDStart + docID >= limit)
+                                                               break;
+                                                       reader.DeleteDocument(docID);
+                                                       any = true;
+                                               }
+                                       }
+                               }
+                               finally
+                               {
+                                       docs.Close();
+                               }
+                               
+                               // Delete by docID
+                               iter = deletesFlushed.docIDs.GetEnumerator();
+                               while (iter.MoveNext())
+                               {
+                                       int docID = ((System.Int32) iter.Current);
+                                       if (docID >= docIDStart && docID < docEnd)
+                                       {
+                                               reader.DeleteDocument(docID - docIDStart);
+                                               any = true;
+                                       }
+                               }
+                               
+                               // Delete by query
+                               IndexSearcher searcher = new IndexSearcher(reader);
+                               iter = new System.Collections.Hashtable(deletesFlushed.queries).GetEnumerator();
+                               while (iter.MoveNext())
+                               {
+                                       System.Collections.DictionaryEntry entry = (System.Collections.DictionaryEntry) iter.Current;
+                                       Query query = (Query) entry.Key;
+                                       int limit = ((System.Int32) entry.Value);
+                                       Weight weight = query.Weight(searcher);
+                                       Scorer scorer = weight.Scorer(reader, true, false);
+                                       if (scorer != null)
+                                       {
+                                               while (true)
+                                               {
+                                                       int doc = scorer.NextDoc();
+                                                       if (((long) docIDStart) + doc >= limit)
+                                                               break;
+                                                       reader.DeleteDocument(doc);
+                                                       any = true;
+                                               }
+                                       }
+                               }
+                               searcher.Close();
+                               return any;
+                       }
+               }
+               
+               // Buffer a term in bufferedDeleteTerms, which records the
+               // current number of documents buffered in ram so that the
+               // delete term will be applied to those documents as well
+               // as the disk segments.
+               private void  AddDeleteTerm(Term term, int docCount)
+               {
+                       lock (this)
+                       {
+                               BufferedDeletes.Num num = (BufferedDeletes.Num) deletesInRAM.terms[term];
+                               int docIDUpto = flushedDocCount + docCount;
+                               if (num == null)
+                                       deletesInRAM.terms[term] = new BufferedDeletes.Num(docIDUpto);
+                               else
+                                       num.SetNum(docIDUpto);
+                               deletesInRAM.numTerms++;
+                               
+                               deletesInRAM.AddBytesUsed(BYTES_PER_DEL_TERM + term.text.Length * CHAR_NUM_BYTE);
+                       }
+               }
+               
+               // Buffer a specific docID for deletion.  Currently only
+               // used when we hit a exception when adding a document
+               private void  AddDeleteDocID(int docID)
+               {
+                       lock (this)
+                       {
+                               deletesInRAM.docIDs.Add((System.Int32) (flushedDocCount + docID));
+                               deletesInRAM.AddBytesUsed(BYTES_PER_DEL_DOCID);
+                       }
+               }
+               
+               private void  AddDeleteQuery(Query query, int docID)
+               {
+                       lock (this)
+                       {
+                               deletesInRAM.queries[query] = (System.Int32) (flushedDocCount + docID);
+                               deletesInRAM.AddBytesUsed(BYTES_PER_DEL_QUERY);
+                       }
+               }
+               
+               internal bool DoBalanceRAM()
+               {
+                       lock (this)
+                       {
+                               return ramBufferSize != IndexWriter.DISABLE_AUTO_FLUSH && !bufferIsFull && (numBytesUsed + deletesInRAM.bytesUsed + deletesFlushed.bytesUsed >= ramBufferSize || numBytesAlloc >= freeTrigger);
+                       }
+               }
+               
+               /// <summary>Does the synchronized work to finish/flush the
+               /// inverted document. 
+               /// </summary>
+               private void  FinishDocument(DocumentsWriterThreadState perThread, DocWriter docWriter)
+               {
+                       
+                       if (DoBalanceRAM())
+                       // Must call this w/o holding synchronized(this) else
+                       // we'll hit deadlock:
+                               BalanceRAM();
+                       
+                       lock (this)
+                       {
+                               
+                               System.Diagnostics.Debug.Assert(docWriter == null || docWriter.docID == perThread.docState.docID);
+                               
+                               if (aborting)
+                               {
+                                       
+                                       // We are currently aborting, and another thread is
+                                       // waiting for me to become idle.  We just forcefully
+                                       // idle this threadState; it will be fully reset by
+                                       // abort()
+                                       if (docWriter != null)
+                                               try
+                                               {
+                                                       docWriter.Abort();
+                                               }
+                                               catch (System.Exception t)
+                                               {
+                                               }
+                                       
+                                       perThread.isIdle = true;
+                                       System.Threading.Monitor.PulseAll(this);
+                                       return ;
+                               }
+                               
+                               bool doPause;
+                               
+                               if (docWriter != null)
+                                       doPause = waitQueue.Add(docWriter);
+                               else
+                               {
+                                       skipDocWriter.docID = perThread.docState.docID;
+                                       doPause = waitQueue.Add(skipDocWriter);
+                               }
+                               
+                               if (doPause)
+                                       WaitForWaitQueue();
+                               
+                               if (bufferIsFull && !flushPending)
+                               {
+                                       flushPending = true;
+                                       perThread.doFlushAfter = true;
+                               }
+                               
+                               perThread.isIdle = true;
+                               System.Threading.Monitor.PulseAll(this);
+                       }
+               }
+               
+               internal void  WaitForWaitQueue()
+               {
+                       lock (this)
+                       {
+                               do 
+                               {
+                                       try
+                                       {
+                                               System.Threading.Monitor.Wait(this);
+                                       }
+                                       catch (System.Threading.ThreadInterruptedException ie)
+                                       {
+                                               // In 3.0 we will change this to throw
+                                               // InterruptedException instead
+                                               SupportClass.ThreadClass.Current().Interrupt();
+                                               throw new System.SystemException(ie.Message, ie);
+                                       }
+                               }
+                               while (!waitQueue.DoResume());
+                       }
+               }
+               
+               internal class SkipDocWriter:DocWriter
+               {
+                       public override void  Finish()
+                       {
+                       }
+                       public override void  Abort()
+                       {
+                       }
+                       public override long SizeInBytes()
+                       {
+                               return 0;
+                       }
+               }
+               internal SkipDocWriter skipDocWriter;
+               
+               internal long GetRAMUsed()
+               {
+                       return numBytesUsed + deletesInRAM.bytesUsed + deletesFlushed.bytesUsed;
+               }
+               
+               internal long numBytesAlloc;
+               internal long numBytesUsed;
+               
+               internal System.Globalization.NumberFormatInfo nf = System.Globalization.CultureInfo.CurrentCulture.NumberFormat;
+               
+               // Coarse estimates used to measure RAM usage of buffered deletes
+               internal const int OBJECT_HEADER_BYTES = 8;
+               internal static readonly int POINTER_NUM_BYTE;
+               internal const int INT_NUM_BYTE = 4;
+               internal const int CHAR_NUM_BYTE = 2;
+               
+               /* Rough logic: HashMap has an array[Entry] w/ varying
+               load factor (say 2 * POINTER).  Entry is object w/ Term
+               key, BufferedDeletes.Num val, int hash, Entry next
+               (OBJ_HEADER + 3*POINTER + INT).  Term is object w/
+               String field and String text (OBJ_HEADER + 2*POINTER).
+               We don't count Term's field since it's interned.
+               Term's text is String (OBJ_HEADER + 4*INT + POINTER +
+               OBJ_HEADER + string.length*CHAR).  BufferedDeletes.num is
+               OBJ_HEADER + INT. */
+               
+               internal static readonly int BYTES_PER_DEL_TERM = 8 * POINTER_NUM_BYTE + 5 * OBJECT_HEADER_BYTES + 6 * INT_NUM_BYTE;
+               
+               /* Rough logic: del docIDs are List<Integer>.  Say list
+               allocates ~2X size (2*POINTER).  Integer is OBJ_HEADER
+               + int */
+               internal static readonly int BYTES_PER_DEL_DOCID = 2 * POINTER_NUM_BYTE + OBJECT_HEADER_BYTES + INT_NUM_BYTE;
+               
+               /* Rough logic: HashMap has an array[Entry] w/ varying
+               load factor (say 2 * POINTER).  Entry is object w/
+               Query key, Integer val, int hash, Entry next
+               (OBJ_HEADER + 3*POINTER + INT).  Query we often
+               undercount (say 24 bytes).  Integer is OBJ_HEADER + INT. */
+               internal static readonly int BYTES_PER_DEL_QUERY = 5 * POINTER_NUM_BYTE + 2 * OBJECT_HEADER_BYTES + 2 * INT_NUM_BYTE + 24;
+               
+               /* Initial chunks size of the shared byte[] blocks used to
+               store postings data */
+               internal const int BYTE_BLOCK_SHIFT = 15;
+               internal static readonly int BYTE_BLOCK_SIZE = 1 << BYTE_BLOCK_SHIFT;
+               internal static readonly int BYTE_BLOCK_MASK = BYTE_BLOCK_SIZE - 1;
+               internal static readonly int BYTE_BLOCK_NOT_MASK = ~ BYTE_BLOCK_MASK;
+               
+               internal class ByteBlockAllocator:ByteBlockPool.Allocator
+               {
+            public ByteBlockAllocator(DocumentsWriter enclosingInstance, int blockSize)
+                       {
+                this.blockSize = blockSize;
+                               InitBlock(enclosingInstance);
+                       }
+                       private void  InitBlock(DocumentsWriter enclosingInstance)
+                       {
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private DocumentsWriter enclosingInstance;
+                       public DocumentsWriter Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+
+            int blockSize;
+                       internal System.Collections.ArrayList freeByteBlocks = new System.Collections.ArrayList();
+            
+                       /* Allocate another byte[] from the shared pool */
+                       public /*internal*/ override byte[] GetByteBlock(bool trackAllocations)
+                       {
+                               lock (Enclosing_Instance)
+                               {
+                                       int size = freeByteBlocks.Count;
+                                       byte[] b;
+                                       if (0 == size)
+                                       {
+                                               // Always record a block allocated, even if
+                                               // trackAllocations is false.  This is necessary
+                                               // because this block will be shared between
+                                               // things that don't track allocations (term
+                                               // vectors) and things that do (freq/prox
+                                               // postings).
+                        Enclosing_Instance.numBytesAlloc += blockSize;
+                                               b = new byte[blockSize];
+                                       }
+                                       else
+                                       {
+                                               System.Object tempObject;
+                                               tempObject = freeByteBlocks[size - 1];
+                                               freeByteBlocks.RemoveAt(size - 1);
+                                               b = (byte[]) tempObject;
+                                       }
+                                       if (trackAllocations)
+                                               Enclosing_Instance.numBytesUsed += blockSize;
+                                       System.Diagnostics.Debug.Assert(Enclosing_Instance.numBytesUsed <= Enclosing_Instance.numBytesAlloc);
+                                       return b;
+                               }
+                       }
+                       
+                       /* Return byte[]'s to the pool */
+                       public /*internal*/ override void  RecycleByteBlocks(byte[][] blocks, int start, int end)
+                       {
+                               lock (Enclosing_Instance)
+                               {
+                    for (int i = start; i < end; i++)
+                    {
+                        freeByteBlocks.Add(blocks[i]);
+                        blocks[i] = null;
+                    }
+                    if (enclosingInstance.infoStream != null && blockSize != 1024)
+                    {
+                        enclosingInstance.Message("DW.recycleByteBlocks blockSize=" + blockSize + " count=" + (end - start) + " total now " + freeByteBlocks.Count);
+                    }
+                               }
+                       }
+
+            public /*internal*/ override void RecycleByteBlocks(System.Collections.ArrayList blocks)
+            {
+                lock (Enclosing_Instance)
+                {
+                    int size = blocks.Count;
+                    for(int i=0;i<size;i++)
+                        freeByteBlocks.Add(blocks[i]);
+                }
+            }
+               }
+               
+               /* Initial chunks size of the shared int[] blocks used to
+               store postings data */
+               internal const int INT_BLOCK_SHIFT = 13;
+               internal static readonly int INT_BLOCK_SIZE = 1 << INT_BLOCK_SHIFT;
+               internal static readonly int INT_BLOCK_MASK = INT_BLOCK_SIZE - 1;
+               
+               private System.Collections.ArrayList freeIntBlocks = new System.Collections.ArrayList();
+               
+               /* Allocate another int[] from the shared pool */
+               internal int[] GetIntBlock(bool trackAllocations)
+               {
+                       lock (this)
+                       {
+                               int size = freeIntBlocks.Count;
+                               int[] b;
+                               if (0 == size)
+                               {
+                                       // Always record a block allocated, even if
+                                       // trackAllocations is false.  This is necessary
+                                       // because this block will be shared between
+                                       // things that don't track allocations (term
+                                       // vectors) and things that do (freq/prox
+                                       // postings).
+                                       numBytesAlloc += INT_BLOCK_SIZE * INT_NUM_BYTE;
+                                       b = new int[INT_BLOCK_SIZE];
+                               }
+                               else
+                               {
+                                       System.Object tempObject;
+                                       tempObject = freeIntBlocks[size - 1];
+                                       freeIntBlocks.RemoveAt(size - 1);
+                                       b = (int[]) tempObject;
+                               }
+                               if (trackAllocations)
+                                       numBytesUsed += INT_BLOCK_SIZE * INT_NUM_BYTE;
+                               System.Diagnostics.Debug.Assert(numBytesUsed <= numBytesAlloc);
+                               return b;
+                       }
+               }
+               
+               internal void  BytesAllocated(long numBytes)
+               {
+                       lock (this)
+                       {
+                               numBytesAlloc += numBytes;
+                       }
+               }
+               
+               internal void  BytesUsed(long numBytes)
+               {
+                       lock (this)
+                       {
+                               numBytesUsed += numBytes;
+                               System.Diagnostics.Debug.Assert(numBytesUsed <= numBytesAlloc);
+                       }
+               }
+               
+               /* Return int[]s to the pool */
+               internal void  RecycleIntBlocks(int[][] blocks, int start, int end)
+               {
+                       lock (this)
+                       {
+                for (int i = start; i < end; i++)
+                {
+                    freeIntBlocks.Add(blocks[i]);
+                    blocks[i] = null;
+                }
+                if (infoStream != null)
+                {
+                    Message("DW.recycleIntBlocks count=" + (end - start) + " total now " + freeIntBlocks.Count);
+                }
+                       }
+               }
+               
+               internal ByteBlockAllocator byteBlockAllocator;
+
+        internal static int PER_DOC_BLOCK_SIZE = 1024;
+
+        ByteBlockAllocator perDocAllocator;
+               
+               /* Initial chunk size of the shared char[] blocks used to
+               store term text */
+               internal const int CHAR_BLOCK_SHIFT = 14;
+               internal static readonly int CHAR_BLOCK_SIZE = 1 << CHAR_BLOCK_SHIFT;
+               internal static readonly int CHAR_BLOCK_MASK = CHAR_BLOCK_SIZE - 1;
+               
+               internal static readonly int MAX_TERM_LENGTH = CHAR_BLOCK_SIZE - 1;
+               
+               private System.Collections.ArrayList freeCharBlocks = new System.Collections.ArrayList();
+               
+               /* Allocate another char[] from the shared pool */
+               internal char[] GetCharBlock()
+               {
+                       lock (this)
+                       {
+                               int size = freeCharBlocks.Count;
+                               char[] c;
+                               if (0 == size)
+                               {
+                                       numBytesAlloc += CHAR_BLOCK_SIZE * CHAR_NUM_BYTE;
+                                       c = new char[CHAR_BLOCK_SIZE];
+                               }
+                               else
+                               {
+                                       System.Object tempObject;
+                                       tempObject = freeCharBlocks[size - 1];
+                                       freeCharBlocks.RemoveAt(size - 1);
+                                       c = (char[]) tempObject;
+                               }
+                               // We always track allocations of char blocks, for now,
+                               // because nothing that skips allocation tracking
+                               // (currently only term vectors) uses its own char
+                               // blocks.
+                               numBytesUsed += CHAR_BLOCK_SIZE * CHAR_NUM_BYTE;
+                               System.Diagnostics.Debug.Assert(numBytesUsed <= numBytesAlloc);
+                               return c;
+                       }
+               }
+               
+               /* Return char[]s to the pool */
+               internal void  RecycleCharBlocks(char[][] blocks, int numBlocks)
+               {
+                       lock (this)
+                       {
+                for (int i = 0; i < numBlocks; i++)
+                {
+                    freeCharBlocks.Add(blocks[i]);
+                    blocks[i] = null;
+                }
+                if (infoStream != null)
+                {
+                    Message("DW.recycleCharBlocks count=" + numBlocks + " total now " + freeCharBlocks.Count);
+                }
+                       }
+               }
+               
+               internal System.String ToMB(long v)
+               {
+                       return System.String.Format(nf, "{0:f}", new System.Object[] { (v / 1024F / 1024F) });
+               }
+
+
+        /* We have four pools of RAM: Postings, byte blocks
+        * (holds freq/prox posting data), char blocks (holds
+        * characters in the term) and per-doc buffers (stored fields/term vectors).  
+        * Different docs require varying amount of storage from 
+        * these four classes.
+        * 
+        * For example, docs with many unique single-occurrence
+        * short terms will use up the Postings RAM and hardly any
+        * of the other two.  Whereas docs with very large terms
+        * will use alot of char blocks RAM and relatively less of
+        * the other two.  This method just frees allocations from
+        * the pools once we are over-budget, which balances the
+        * pools to match the current docs. */
+               internal void  BalanceRAM()
+               {
+                       
+                       // We flush when we've used our target usage
+                       long flushTrigger = ramBufferSize;
+                       
+                       long deletesRAMUsed = deletesInRAM.bytesUsed + deletesFlushed.bytesUsed;
+                       
+                       if (numBytesAlloc + deletesRAMUsed > freeTrigger)
+                       {
+                               
+                               if (infoStream != null)
+                                       Message(
+                        "  RAM: now balance allocations: usedMB=" + ToMB(numBytesUsed) + 
+                        " vs trigger=" + ToMB(flushTrigger) + 
+                        " allocMB=" + ToMB(numBytesAlloc) + 
+                        " deletesMB=" + ToMB(deletesRAMUsed) + 
+                        " vs trigger=" + ToMB(freeTrigger) + 
+                        " byteBlockFree=" + ToMB(byteBlockAllocator.freeByteBlocks.Count * BYTE_BLOCK_SIZE) +
+                        " perDocFree=" + ToMB(perDocAllocator.freeByteBlocks.Count * PER_DOC_BLOCK_SIZE) +
+                        " charBlockFree=" + ToMB(freeCharBlocks.Count * CHAR_BLOCK_SIZE * CHAR_NUM_BYTE));
+                               
+                               long startBytesAlloc = numBytesAlloc + deletesRAMUsed;
+                               
+                               int iter = 0;
+                               
+                               // We free equally from each pool in 32 KB
+                               // chunks until we are below our threshold
+                               // (freeLevel)
+                               
+                               bool any = true;
+                               
+                               while (numBytesAlloc + deletesRAMUsed > freeLevel)
+                               {
+                                       
+                                       lock (this)
+                                       {
+                        if (0 == perDocAllocator.freeByteBlocks.Count
+                              && 0 == byteBlockAllocator.freeByteBlocks.Count
+                              && 0 == freeCharBlocks.Count
+                              && 0 == freeIntBlocks.Count
+                              && !any)
+                                               {
+                                                       // Nothing else to free -- must flush now.
+                                                       bufferIsFull = numBytesUsed + deletesRAMUsed > flushTrigger;
+                                                       if (infoStream != null)
+                                                       {
+                                if (bufferIsFull)
+                                                                       Message("    nothing to free; now set bufferIsFull");
+                                                               else
+                                                                       Message("    nothing to free");
+                                                       }
+                                                       System.Diagnostics.Debug.Assert(numBytesUsed <= numBytesAlloc);
+                                                       break;
+                                               }
+                                               
+                                               if ((0 == iter % 5) && byteBlockAllocator.freeByteBlocks.Count > 0)
+                                               {
+                                                       byteBlockAllocator.freeByteBlocks.RemoveAt(byteBlockAllocator.freeByteBlocks.Count - 1);
+                                                       numBytesAlloc -= BYTE_BLOCK_SIZE;
+                                               }
+                                               
+                                               if ((1 == iter % 5) && freeCharBlocks.Count > 0)
+                                               {
+                                                       freeCharBlocks.RemoveAt(freeCharBlocks.Count - 1);
+                                                       numBytesAlloc -= CHAR_BLOCK_SIZE * CHAR_NUM_BYTE;
+                                               }
+                                               
+                                               if ((2 == iter % 5) && freeIntBlocks.Count > 0)
+                                               {
+                                                       freeIntBlocks.RemoveAt(freeIntBlocks.Count - 1);
+                                                       numBytesAlloc -= INT_BLOCK_SIZE * INT_NUM_BYTE;
+                                               }
+
+                        if ((3 == iter % 5) && perDocAllocator.freeByteBlocks.Count > 0)
+                        {
+                            // Remove upwards of 32 blocks (each block is 1K)
+                            for (int i = 0; i < 32; ++i)
+                            {
+                                perDocAllocator.freeByteBlocks.RemoveAt(perDocAllocator.freeByteBlocks.Count - 1);
+                                numBytesAlloc -= PER_DOC_BLOCK_SIZE;
+                                if (perDocAllocator.freeByteBlocks.Count == 0)
+                                {
+                                    break;
+                                }
+                            }
+                        }
+                                       }
+                                       
+                                       if ((4 == iter % 5) && any)
+                                       // Ask consumer to free any recycled state
+                                               any = consumer.FreeRAM();
+                                       
+                                       iter++;
+                               }
+                               
+                               if (infoStream != null)
+                                       Message(System.String.Format(nf, "    after free: freedMB={0:f} usedMB={1:f} allocMB={2:f}",
+                                               new System.Object[] { ((startBytesAlloc - numBytesAlloc) / 1024.0 / 1024.0), (numBytesUsed / 1024.0 / 1024.0), (numBytesAlloc / 1024.0 / 1024.0) }));
+            }
+                       else
+                       {
+                               // If we have not crossed the 100% mark, but have
+                               // crossed the 95% mark of RAM we are actually
+                               // using, go ahead and flush.  This prevents
+                               // over-allocating and then freeing, with every
+                               // flush.
+                               lock (this)
+                               {
+                                       
+                                       if (numBytesUsed + deletesRAMUsed > flushTrigger)
+                                       {
+                                               if (infoStream != null)
+                                                       Message(System.String.Format(nf, "  RAM: now flush @ usedMB={0:f} allocMB={1:f} triggerMB={2:f}",
+                                                               new object[] { (numBytesUsed / 1024.0 / 1024.0), (numBytesAlloc / 1024.0 / 1024.0), (flushTrigger / 1024.0 / 1024.0) }));
+                                               
+                                               bufferIsFull = true;
+                                       }
+                               }
+                       }
+               }
+               
+               internal WaitQueue waitQueue;
+               
+               internal class WaitQueue
+               {
+                       private void  InitBlock(DocumentsWriter enclosingInstance)
+                       {
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private DocumentsWriter enclosingInstance;
+                       public DocumentsWriter Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       internal DocWriter[] waiting;
+                       internal int nextWriteDocID;
+                       internal int nextWriteLoc;
+                       internal int numWaiting;
+                       internal long waitingBytes;
+                       
+                       public WaitQueue(DocumentsWriter enclosingInstance)
+                       {
+                               InitBlock(enclosingInstance);
+                               waiting = new DocWriter[10];
+                       }
+                       
+                       internal void  Reset()
+                       {
+                               lock (this)
+                               {
+                                       // NOTE: nextWriteLoc doesn't need to be reset
+                                       System.Diagnostics.Debug.Assert(numWaiting == 0);
+                                       System.Diagnostics.Debug.Assert(waitingBytes == 0);
+                                       nextWriteDocID = 0;
+                               }
+                       }
+                       
+                       internal bool DoResume()
+                       {
+                               lock (this)
+                               {
+                                       return waitingBytes <= Enclosing_Instance.waitQueueResumeBytes;
+                               }
+                       }
+                       
+                       internal bool DoPause()
+                       {
+                               lock (this)
+                               {
+                                       return waitingBytes > Enclosing_Instance.waitQueuePauseBytes;
+                               }
+                       }
+                       
+                       internal void  Abort()
+                       {
+                               lock (this)
+                               {
+                                       int count = 0;
+                                       for (int i = 0; i < waiting.Length; i++)
+                                       {
+                                               DocWriter doc = waiting[i];
+                                               if (doc != null)
+                                               {
+                                                       doc.Abort();
+                                                       waiting[i] = null;
+                                                       count++;
+                                               }
+                                       }
+                                       waitingBytes = 0;
+                                       System.Diagnostics.Debug.Assert(count == numWaiting);
+                                       numWaiting = 0;
+                               }
+                       }
+                       
+                       private void  WriteDocument(DocWriter doc)
+                       {
+                System.Diagnostics.Debug.Assert(doc == Enclosing_Instance.skipDocWriter || nextWriteDocID == doc.docID);
+                               bool success = false;
+                               try
+                               {
+                                       doc.Finish();
+                                       nextWriteDocID++;
+                                       Enclosing_Instance.numDocsInStore++;
+                                       nextWriteLoc++;
+                                       System.Diagnostics.Debug.Assert(nextWriteLoc <= waiting.Length);
+                                       if (nextWriteLoc == waiting.Length)
+                                               nextWriteLoc = 0;
+                                       success = true;
+                               }
+                               finally
+                               {
+                                       if (!success)
+                                               Enclosing_Instance.SetAborting();
+                               }
+                       }
+                       
+                       public bool Add(DocWriter doc)
+                       {
+                               lock (this)
+                               {
+                                       
+                                       System.Diagnostics.Debug.Assert(doc.docID >= nextWriteDocID);
+                                       
+                                       if (doc.docID == nextWriteDocID)
+                                       {
+                                               WriteDocument(doc);
+                                               while (true)
+                                               {
+                                                       doc = waiting[nextWriteLoc];
+                                                       if (doc != null)
+                                                       {
+                                                               numWaiting--;
+                                                               waiting[nextWriteLoc] = null;
+                                                               waitingBytes -= doc.SizeInBytes();
+                                                               WriteDocument(doc);
+                                                       }
+                                                       else
+                                                               break;
+                                               }
+                                       }
+                                       else
+                                       {
+                                               
+                                               // I finished before documents that were added
+                                               // before me.  This can easily happen when I am a
+                                               // small doc and the docs before me were large, or,
+                                               // just due to luck in the thread scheduling.  Just
+                                               // add myself to the queue and when that large doc
+                                               // finishes, it will flush me:
+                                               int gap = doc.docID - nextWriteDocID;
+                                               if (gap >= waiting.Length)
+                                               {
+                                                       // Grow queue
+                                                       DocWriter[] newArray = new DocWriter[ArrayUtil.GetNextSize(gap)];
+                                                       System.Diagnostics.Debug.Assert(nextWriteLoc >= 0);
+                                                       Array.Copy(waiting, nextWriteLoc, newArray, 0, waiting.Length - nextWriteLoc);
+                                                       Array.Copy(waiting, 0, newArray, waiting.Length - nextWriteLoc, nextWriteLoc);
+                                                       nextWriteLoc = 0;
+                                                       waiting = newArray;
+                                                       gap = doc.docID - nextWriteDocID;
+                                               }
+                                               
+                                               int loc = nextWriteLoc + gap;
+                                               if (loc >= waiting.Length)
+                                                       loc -= waiting.Length;
+                                               
+                                               // We should only wrap one time
+                                               System.Diagnostics.Debug.Assert(loc < waiting.Length);
+                                               
+                                               // Nobody should be in my spot!
+                                               System.Diagnostics.Debug.Assert(waiting [loc] == null);
+                                               waiting[loc] = doc;
+                                               numWaiting++;
+                                               waitingBytes += doc.SizeInBytes();
+                                       }
+                                       
+                                       return DoPause();
+                               }
+                       }
+               }
+               static DocumentsWriter()
+               {
+                       DefaultIndexingChain = new AnonymousClassIndexingChain();
+                       POINTER_NUM_BYTE = Constants.JRE_IS_64BIT?8:4;
+               }
+
+        public static int BYTE_BLOCK_SIZE_ForNUnit
+        {
+            get { return BYTE_BLOCK_SIZE; }
+        }
+
+        public static int CHAR_BLOCK_SIZE_ForNUnit
+        {
+            get { return CHAR_BLOCK_SIZE; }
+        }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/DocumentsWriterThreadState.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/DocumentsWriterThreadState.cs
new file mode 100644 (file)
index 0000000..002fa1f
--- /dev/null
@@ -0,0 +1,57 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       /// <summary>Used by DocumentsWriter to maintain per-thread state.
+       /// We keep a separate Posting hash and other state for each
+       /// thread and then merge postings hashes from all threads
+       /// when writing the segment. 
+       /// </summary>
+       sealed class DocumentsWriterThreadState
+       {
+               
+               internal bool isIdle = true; // false if this is currently in use by a thread
+               internal int numThreads = 1; // Number of threads that share this instance
+               internal bool doFlushAfter; // true if we should flush after processing current doc
+               internal DocConsumerPerThread consumer;
+               internal DocumentsWriter.DocState docState;
+               
+               internal DocumentsWriter docWriter;
+               
+               public DocumentsWriterThreadState(DocumentsWriter docWriter)
+               {
+                       this.docWriter = docWriter;
+                       docState = new DocumentsWriter.DocState();
+                       docState.maxFieldLength = docWriter.maxFieldLength;
+                       docState.infoStream = docWriter.infoStream;
+                       docState.similarity = docWriter.similarity;
+                       docState.docWriter = docWriter;
+                       docState.allowMinus1Position = docWriter.writer.GetAllowMinus1Position();
+                       consumer = docWriter.consumer.AddThread(this);
+               }
+               
+               internal void  DoAfterFlush()
+               {
+                       numThreads = 0;
+                       doFlushAfter = false;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/FieldInfo.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/FieldInfo.cs
new file mode 100644 (file)
index 0000000..78f27ea
--- /dev/null
@@ -0,0 +1,136 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       public sealed class FieldInfo : System.ICloneable
+       {
+               internal System.String name;
+               internal bool isIndexed;
+               internal int number;
+               
+               // true if term vector for this field should be stored
+               internal bool storeTermVector;
+               internal bool storeOffsetWithTermVector;
+               internal bool storePositionWithTermVector;
+               
+               internal bool omitNorms; // omit norms associated with indexed fields  
+               internal bool omitTermFreqAndPositions;
+               
+               internal bool storePayloads; // whether this field stores payloads together with term positions
+               
+               internal FieldInfo(System.String na, bool tk, int nu, bool storeTermVector, bool storePositionWithTermVector, bool storeOffsetWithTermVector, bool omitNorms, bool storePayloads, bool omitTermFreqAndPositions)
+               {
+                       name = na;
+                       isIndexed = tk;
+                       number = nu;
+                       if (isIndexed)
+                       {
+                               this.storeTermVector = storeTermVector;
+                               this.storeOffsetWithTermVector = storeOffsetWithTermVector;
+                               this.storePositionWithTermVector = storePositionWithTermVector;
+                               this.storePayloads = storePayloads;
+                               this.omitNorms = omitNorms;
+                               this.omitTermFreqAndPositions = omitTermFreqAndPositions;
+                       }
+                       else
+                       {
+                               // for non-indexed fields, leave defaults
+                               this.storeTermVector = false;
+                               this.storeOffsetWithTermVector = false;
+                               this.storePositionWithTermVector = false;
+                               this.storePayloads = false;
+                               this.omitNorms = true;
+                               this.omitTermFreqAndPositions = false;
+                       }
+               }
+               
+               public System.Object Clone()
+               {
+                       return new FieldInfo(name, isIndexed, number, storeTermVector, storePositionWithTermVector, storeOffsetWithTermVector, omitNorms, storePayloads, omitTermFreqAndPositions);
+               }
+               
+               internal void  Update(bool isIndexed, bool storeTermVector, bool storePositionWithTermVector, bool storeOffsetWithTermVector, bool omitNorms, bool storePayloads, bool omitTermFreqAndPositions)
+               {
+                       if (this.isIndexed != isIndexed)
+                       {
+                               this.isIndexed = true; // once indexed, always index
+                       }
+                       if (isIndexed)
+                       {
+                               // if updated field data is not for indexing, leave the updates out
+                               if (this.storeTermVector != storeTermVector)
+                               {
+                                       this.storeTermVector = true; // once vector, always vector
+                               }
+                               if (this.storePositionWithTermVector != storePositionWithTermVector)
+                               {
+                                       this.storePositionWithTermVector = true; // once vector, always vector
+                               }
+                               if (this.storeOffsetWithTermVector != storeOffsetWithTermVector)
+                               {
+                                       this.storeOffsetWithTermVector = true; // once vector, always vector
+                               }
+                               if (this.storePayloads != storePayloads)
+                               {
+                                       this.storePayloads = true;
+                               }
+                               if (this.omitNorms != omitNorms)
+                               {
+                                       this.omitNorms = false; // once norms are stored, always store
+                               }
+                               if (this.omitTermFreqAndPositions != omitTermFreqAndPositions)
+                               {
+                                       this.omitTermFreqAndPositions = true; // if one require omitTermFreqAndPositions at least once, it remains off for life
+                               }
+                       }
+               }
+
+        public bool storePayloads_ForNUnit
+        {
+            get { return storePayloads; }
+        }
+
+        public System.String name_ForNUnit
+        {
+            get { return name; }
+        }
+
+        public bool isIndexed_ForNUnit
+        {
+            get { return isIndexed; }
+        }
+
+        public bool omitNorms_ForNUnit
+        {
+            get { return omitNorms; }
+        }
+
+        public bool omitTermFreqAndPositions_ForNUnit
+        {
+            get { return omitTermFreqAndPositions; }
+        }
+
+        public bool storeTermVector_ForNUnit
+        {
+            get { return storeTermVector; }
+        }
+    }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/FieldInfos.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/FieldInfos.cs
new file mode 100644 (file)
index 0000000..1780be4
--- /dev/null
@@ -0,0 +1,486 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using Document = Mono.Lucene.Net.Documents.Document;
+using Fieldable = Mono.Lucene.Net.Documents.Fieldable;
+using Directory = Mono.Lucene.Net.Store.Directory;
+using IndexInput = Mono.Lucene.Net.Store.IndexInput;
+using IndexOutput = Mono.Lucene.Net.Store.IndexOutput;
+using StringHelper = Mono.Lucene.Net.Util.StringHelper;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       /// <summary>Access to the Fieldable Info file that describes document fields and whether or
+       /// not they are indexed. Each segment has a separate Fieldable Info file. Objects
+       /// of this class are thread-safe for multiple readers, but only one thread can
+       /// be adding documents at a time, with no other reader or writer threads
+       /// accessing this object.
+       /// </summary>
+       public sealed class FieldInfos : System.ICloneable
+       {
+               
+               // Used internally (ie not written to *.fnm files) for pre-2.9 files
+               public const int FORMAT_PRE = - 1;
+               
+               // First used in 2.9; prior to 2.9 there was no format header
+               public const int FORMAT_START = - 2;
+               
+               internal static readonly int CURRENT_FORMAT = FORMAT_START;
+               
+               internal const byte IS_INDEXED = (byte) (0x1);
+               internal const byte STORE_TERMVECTOR = (byte) (0x2);
+               internal const byte STORE_POSITIONS_WITH_TERMVECTOR = (byte) (0x4);
+               internal const byte STORE_OFFSET_WITH_TERMVECTOR = (byte) (0x8);
+               internal const byte OMIT_NORMS = (byte) (0x10);
+               internal const byte STORE_PAYLOADS = (byte) (0x20);
+               internal const byte OMIT_TERM_FREQ_AND_POSITIONS = (byte) (0x40);
+               
+               private System.Collections.ArrayList byNumber = new System.Collections.ArrayList();
+               private System.Collections.Hashtable byName = new System.Collections.Hashtable();
+               private int format;
+               
+               public /*internal*/ FieldInfos()
+               {
+               }
+               
+               /// <summary> Construct a FieldInfos object using the directory and the name of the file
+               /// IndexInput
+               /// </summary>
+               /// <param name="d">The directory to open the IndexInput from
+               /// </param>
+               /// <param name="name">The name of the file to open the IndexInput from in the Directory
+               /// </param>
+               /// <throws>  IOException </throws>
+               public /*internal*/ FieldInfos(Directory d, System.String name)
+               {
+                       IndexInput input = d.OpenInput(name);
+                       try
+                       {
+                               try
+                               {
+                                       Read(input, name);
+                               }
+                               catch (System.IO.IOException ioe)
+                               {
+                                       if (format == FORMAT_PRE)
+                                       {
+                                               // LUCENE-1623: FORMAT_PRE (before there was a
+                                               // format) may be 2.3.2 (pre-utf8) or 2.4.x (utf8)
+                                               // encoding; retry with input set to pre-utf8
+                                               input.Seek(0);
+                                               input.SetModifiedUTF8StringsMode();
+                                               byNumber.Clear();
+                                               byName.Clear();
+                                               try
+                                               {
+                                                       Read(input, name);
+                                               }
+                                               catch (System.Exception t)
+                                               {
+                                                       // Ignore any new exception & throw original IOE
+                                                       throw ioe;
+                                               }
+                                       }
+                                       else
+                                       {
+                                               // The IOException cannot be caused by
+                                               // LUCENE-1623, so re-throw it
+                                               throw ioe;
+                                       }
+                               }
+                       }
+                       finally
+                       {
+                               input.Close();
+                       }
+               }
+               
+               /// <summary> Returns a deep clone of this FieldInfos instance.</summary>
+               public System.Object Clone()
+               {
+            lock (this)
+            {
+                FieldInfos fis = new FieldInfos();
+                int numField = byNumber.Count;
+                for (int i = 0; i < numField; i++)
+                {
+                    FieldInfo fi = (FieldInfo)((FieldInfo)byNumber[i]).Clone();
+                    fis.byNumber.Add(fi);
+                    fis.byName[fi.name] = fi;
+                }
+                return fis;
+            }
+               }
+               
+               /// <summary>Adds field info for a Document. </summary>
+               public void  Add(Document doc)
+               {
+                       lock (this)
+                       {
+                               System.Collections.IList fields = doc.GetFields();
+                               System.Collections.IEnumerator fieldIterator = fields.GetEnumerator();
+                               while (fieldIterator.MoveNext())
+                               {
+                                       Fieldable field = (Fieldable) fieldIterator.Current;
+                                       Add(field.Name(), field.IsIndexed(), field.IsTermVectorStored(), field.IsStorePositionWithTermVector(), field.IsStoreOffsetWithTermVector(), field.GetOmitNorms(), false, field.GetOmitTf());
+                               }
+                       }
+               }
+               
+               /// <summary>Returns true if any fields do not omitTermFreqAndPositions </summary>
+               internal bool HasProx()
+               {
+                       int numFields = byNumber.Count;
+                       for (int i = 0; i < numFields; i++)
+                       {
+                               FieldInfo fi = FieldInfo(i);
+                               if (fi.isIndexed && !fi.omitTermFreqAndPositions)
+                               {
+                                       return true;
+                               }
+                       }
+                       return false;
+               }
+               
+               /// <summary> Add fields that are indexed. Whether they have termvectors has to be specified.
+               /// 
+               /// </summary>
+               /// <param name="names">The names of the fields
+               /// </param>
+               /// <param name="storeTermVectors">Whether the fields store term vectors or not
+               /// </param>
+               /// <param name="storePositionWithTermVector">true if positions should be stored.
+               /// </param>
+               /// <param name="storeOffsetWithTermVector">true if offsets should be stored
+               /// </param>
+               public void  AddIndexed(System.Collections.ICollection names, bool storeTermVectors, bool storePositionWithTermVector, bool storeOffsetWithTermVector)
+               {
+                       lock (this)
+                       {
+                               System.Collections.IEnumerator i = names.GetEnumerator();
+                               while (i.MoveNext())
+                               {
+                                       Add((System.String) i.Current, true, storeTermVectors, storePositionWithTermVector, storeOffsetWithTermVector);
+                               }
+                       }
+               }
+               
+               /// <summary> Assumes the fields are not storing term vectors.
+               /// 
+               /// </summary>
+               /// <param name="names">The names of the fields
+               /// </param>
+               /// <param name="isIndexed">Whether the fields are indexed or not
+               /// 
+               /// </param>
+               /// <seealso cref="Add(String, boolean)">
+               /// </seealso>
+        public void Add(System.Collections.Generic.ICollection<string> names, bool isIndexed)
+               {
+                       lock (this)
+                       {
+                               System.Collections.IEnumerator i = names.GetEnumerator();
+                               while (i.MoveNext())
+                               {
+                                       Add((System.String) i.Current, isIndexed);
+                               }
+                       }
+               }
+               
+               /// <summary> Calls 5 parameter add with false for all TermVector parameters.
+               /// 
+               /// </summary>
+               /// <param name="name">The name of the Fieldable
+               /// </param>
+               /// <param name="isIndexed">true if the field is indexed
+               /// </param>
+               /// <seealso cref="Add(String, boolean, boolean, boolean, boolean)">
+               /// </seealso>
+               public void  Add(System.String name, bool isIndexed)
+               {
+                       lock (this)
+                       {
+                               Add(name, isIndexed, false, false, false, false);
+                       }
+               }
+               
+               /// <summary> Calls 5 parameter add with false for term vector positions and offsets.
+               /// 
+               /// </summary>
+               /// <param name="name">The name of the field
+               /// </param>
+               /// <param name="isIndexed"> true if the field is indexed
+               /// </param>
+               /// <param name="storeTermVector">true if the term vector should be stored
+               /// </param>
+               public void  Add(System.String name, bool isIndexed, bool storeTermVector)
+               {
+                       lock (this)
+                       {
+                               Add(name, isIndexed, storeTermVector, false, false, false);
+                       }
+               }
+               
+               /// <summary>If the field is not yet known, adds it. If it is known, checks to make
+               /// sure that the isIndexed flag is the same as was given previously for this
+               /// field. If not - marks it as being indexed.  Same goes for the TermVector
+               /// parameters.
+               /// 
+               /// </summary>
+               /// <param name="name">The name of the field
+               /// </param>
+               /// <param name="isIndexed">true if the field is indexed
+               /// </param>
+               /// <param name="storeTermVector">true if the term vector should be stored
+               /// </param>
+               /// <param name="storePositionWithTermVector">true if the term vector with positions should be stored
+               /// </param>
+               /// <param name="storeOffsetWithTermVector">true if the term vector with offsets should be stored
+               /// </param>
+               public void  Add(System.String name, bool isIndexed, bool storeTermVector, bool storePositionWithTermVector, bool storeOffsetWithTermVector)
+               {
+                       lock (this)
+                       {
+                               
+                               Add(name, isIndexed, storeTermVector, storePositionWithTermVector, storeOffsetWithTermVector, false);
+                       }
+               }
+               
+               /// <summary>If the field is not yet known, adds it. If it is known, checks to make
+               /// sure that the isIndexed flag is the same as was given previously for this
+               /// field. If not - marks it as being indexed.  Same goes for the TermVector
+               /// parameters.
+               /// 
+               /// </summary>
+               /// <param name="name">The name of the field
+               /// </param>
+               /// <param name="isIndexed">true if the field is indexed
+               /// </param>
+               /// <param name="storeTermVector">true if the term vector should be stored
+               /// </param>
+               /// <param name="storePositionWithTermVector">true if the term vector with positions should be stored
+               /// </param>
+               /// <param name="storeOffsetWithTermVector">true if the term vector with offsets should be stored
+               /// </param>
+               /// <param name="omitNorms">true if the norms for the indexed field should be omitted
+               /// </param>
+               public void  Add(System.String name, bool isIndexed, bool storeTermVector, bool storePositionWithTermVector, bool storeOffsetWithTermVector, bool omitNorms)
+               {
+                       lock (this)
+                       {
+                               Add(name, isIndexed, storeTermVector, storePositionWithTermVector, storeOffsetWithTermVector, omitNorms, false, false);
+                       }
+               }
+               
+               /// <summary>If the field is not yet known, adds it. If it is known, checks to make
+               /// sure that the isIndexed flag is the same as was given previously for this
+               /// field. If not - marks it as being indexed.  Same goes for the TermVector
+               /// parameters.
+               /// 
+               /// </summary>
+               /// <param name="name">The name of the field
+               /// </param>
+               /// <param name="isIndexed">true if the field is indexed
+               /// </param>
+               /// <param name="storeTermVector">true if the term vector should be stored
+               /// </param>
+               /// <param name="storePositionWithTermVector">true if the term vector with positions should be stored
+               /// </param>
+               /// <param name="storeOffsetWithTermVector">true if the term vector with offsets should be stored
+               /// </param>
+               /// <param name="omitNorms">true if the norms for the indexed field should be omitted
+               /// </param>
+               /// <param name="storePayloads">true if payloads should be stored for this field
+               /// </param>
+               /// <param name="omitTermFreqAndPositions">true if term freqs should be omitted for this field
+               /// </param>
+               public FieldInfo Add(System.String name, bool isIndexed, bool storeTermVector, bool storePositionWithTermVector, bool storeOffsetWithTermVector, bool omitNorms, bool storePayloads, bool omitTermFreqAndPositions)
+               {
+                       lock (this)
+                       {
+                               FieldInfo fi = FieldInfo(name);
+                               if (fi == null)
+                               {
+                                       return AddInternal(name, isIndexed, storeTermVector, storePositionWithTermVector, storeOffsetWithTermVector, omitNorms, storePayloads, omitTermFreqAndPositions);
+                               }
+                               else
+                               {
+                                       fi.Update(isIndexed, storeTermVector, storePositionWithTermVector, storeOffsetWithTermVector, omitNorms, storePayloads, omitTermFreqAndPositions);
+                               }
+                               return fi;
+                       }
+               }
+               
+               private FieldInfo AddInternal(System.String name, bool isIndexed, bool storeTermVector, bool storePositionWithTermVector, bool storeOffsetWithTermVector, bool omitNorms, bool storePayloads, bool omitTermFreqAndPositions)
+               {
+                       name = StringHelper.Intern(name);
+                       FieldInfo fi = new FieldInfo(name, isIndexed, byNumber.Count, storeTermVector, storePositionWithTermVector, storeOffsetWithTermVector, omitNorms, storePayloads, omitTermFreqAndPositions);
+                       byNumber.Add(fi);
+                       byName[name] = fi;
+                       return fi;
+               }
+               
+               public int FieldNumber(System.String fieldName)
+               {
+                       FieldInfo fi = FieldInfo(fieldName);
+                       return (fi != null)?fi.number:- 1;
+               }
+               
+               public FieldInfo FieldInfo(System.String fieldName)
+               {
+                       return (FieldInfo) byName[fieldName];
+               }
+               
+               /// <summary> Return the fieldName identified by its number.
+               /// 
+               /// </summary>
+               /// <param name="fieldNumber">
+               /// </param>
+               /// <returns> the fieldName or an empty string when the field
+               /// with the given number doesn't exist.
+               /// </returns>
+               public System.String FieldName(int fieldNumber)
+               {
+                       FieldInfo fi = FieldInfo(fieldNumber);
+                       return (fi != null)?fi.name:"";
+               }
+               
+               /// <summary> Return the fieldinfo object referenced by the fieldNumber.</summary>
+               /// <param name="fieldNumber">
+               /// </param>
+               /// <returns> the FieldInfo object or null when the given fieldNumber
+               /// doesn't exist.
+               /// </returns>
+               public FieldInfo FieldInfo(int fieldNumber)
+               {
+                       return (fieldNumber >= 0)?(FieldInfo) byNumber[fieldNumber]:null;
+               }
+               
+               public int Size()
+               {
+                       return byNumber.Count;
+               }
+               
+               public bool HasVectors()
+               {
+                       bool hasVectors = false;
+                       for (int i = 0; i < Size(); i++)
+                       {
+                               if (FieldInfo(i).storeTermVector)
+                               {
+                                       hasVectors = true;
+                                       break;
+                               }
+                       }
+                       return hasVectors;
+               }
+               
+               public void  Write(Directory d, System.String name)
+               {
+                       IndexOutput output = d.CreateOutput(name);
+                       try
+                       {
+                               Write(output);
+                       }
+                       finally
+                       {
+                               output.Close();
+                       }
+               }
+               
+               public void  Write(IndexOutput output)
+               {
+                       output.WriteVInt(CURRENT_FORMAT);
+                       output.WriteVInt(Size());
+                       for (int i = 0; i < Size(); i++)
+                       {
+                               FieldInfo fi = FieldInfo(i);
+                               byte bits = (byte) (0x0);
+                               if (fi.isIndexed)
+                                       bits |= IS_INDEXED;
+                               if (fi.storeTermVector)
+                                       bits |= STORE_TERMVECTOR;
+                               if (fi.storePositionWithTermVector)
+                                       bits |= STORE_POSITIONS_WITH_TERMVECTOR;
+                               if (fi.storeOffsetWithTermVector)
+                                       bits |= STORE_OFFSET_WITH_TERMVECTOR;
+                               if (fi.omitNorms)
+                                       bits |= OMIT_NORMS;
+                               if (fi.storePayloads)
+                                       bits |= STORE_PAYLOADS;
+                               if (fi.omitTermFreqAndPositions)
+                                       bits |= OMIT_TERM_FREQ_AND_POSITIONS;
+                               
+                               output.WriteString(fi.name);
+                               output.WriteByte(bits);
+                       }
+               }
+               
+               private void  Read(IndexInput input, System.String fileName)
+               {
+                       int firstInt = input.ReadVInt();
+                       
+                       if (firstInt < 0)
+                       {
+                               // This is a real format
+                               format = firstInt;
+                       }
+                       else
+                       {
+                               format = FORMAT_PRE;
+                       }
+                       
+                       if (format != FORMAT_PRE & format != FORMAT_START)
+                       {
+                               throw new CorruptIndexException("unrecognized format " + format + " in file \"" + fileName + "\"");
+                       }
+                       
+                       int size;
+                       if (format == FORMAT_PRE)
+                       {
+                               size = firstInt;
+                       }
+                       else
+                       {
+                               size = input.ReadVInt(); //read in the size
+                       }
+                       
+                       for (int i = 0; i < size; i++)
+                       {
+                               System.String name = StringHelper.Intern(input.ReadString());
+                               byte bits = input.ReadByte();
+                               bool isIndexed = (bits & IS_INDEXED) != 0;
+                               bool storeTermVector = (bits & STORE_TERMVECTOR) != 0;
+                               bool storePositionsWithTermVector = (bits & STORE_POSITIONS_WITH_TERMVECTOR) != 0;
+                               bool storeOffsetWithTermVector = (bits & STORE_OFFSET_WITH_TERMVECTOR) != 0;
+                               bool omitNorms = (bits & OMIT_NORMS) != 0;
+                               bool storePayloads = (bits & STORE_PAYLOADS) != 0;
+                               bool omitTermFreqAndPositions = (bits & OMIT_TERM_FREQ_AND_POSITIONS) != 0;
+                               
+                               AddInternal(name, isIndexed, storeTermVector, storePositionsWithTermVector, storeOffsetWithTermVector, omitNorms, storePayloads, omitTermFreqAndPositions);
+                       }
+                       
+                       if (input.GetFilePointer() != input.Length())
+                       {
+                               throw new CorruptIndexException("did not read all bytes from file \"" + fileName + "\": read " + input.GetFilePointer() + " vs size " + input.Length());
+                       }
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/FieldInvertState.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/FieldInvertState.cs
new file mode 100644 (file)
index 0000000..5b5ea45
--- /dev/null
@@ -0,0 +1,115 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using AttributeSource = Mono.Lucene.Net.Util.AttributeSource;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       /// <summary> This class tracks the number and position / offset parameters of terms
+       /// being added to the index. The information collected in this class is
+       /// also used to calculate the normalization factor for a field.
+       /// 
+       /// <p/><b>WARNING</b>: This API is new and experimental, and may suddenly
+       /// change.<p/>
+       /// </summary>
+       public sealed class FieldInvertState
+       {
+               internal int position;
+               internal int length;
+               internal int numOverlap;
+               internal int offset;
+               internal float boost;
+               internal AttributeSource attributeSource;
+               
+               public FieldInvertState()
+               {
+               }
+               
+               public FieldInvertState(int position, int length, int numOverlap, int offset, float boost)
+               {
+                       this.position = position;
+                       this.length = length;
+                       this.numOverlap = numOverlap;
+                       this.offset = offset;
+                       this.boost = boost;
+               }
+               
+               /// <summary> Re-initialize the state, using this boost value.</summary>
+               /// <param name="docBoost">boost value to use.
+               /// </param>
+               internal void  Reset(float docBoost)
+               {
+                       position = 0;
+                       length = 0;
+                       numOverlap = 0;
+                       offset = 0;
+                       boost = docBoost;
+                       attributeSource = null;
+               }
+               
+               /// <summary> Get the last processed term position.</summary>
+               /// <returns> the position
+               /// </returns>
+               public int GetPosition()
+               {
+                       return position;
+               }
+               
+               /// <summary> Get total number of terms in this field.</summary>
+               /// <returns> the length
+               /// </returns>
+               public int GetLength()
+               {
+                       return length;
+               }
+               
+               /// <summary> Get the number of terms with <code>positionIncrement == 0</code>.</summary>
+               /// <returns> the numOverlap
+               /// </returns>
+               public int GetNumOverlap()
+               {
+                       return numOverlap;
+               }
+               
+               /// <summary> Get end offset of the last processed term.</summary>
+               /// <returns> the offset
+               /// </returns>
+               public int GetOffset()
+               {
+                       return offset;
+               }
+               
+               /// <summary> Get boost value. This is the cumulative product of
+               /// document boost and field boost for all field instances
+               /// sharing the same field name.
+               /// </summary>
+               /// <returns> the boost
+               /// </returns>
+               public float GetBoost()
+               {
+                       return boost;
+               }
+               
+               public AttributeSource GetAttributeSource()
+               {
+                       return attributeSource;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/FieldReaderException.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/FieldReaderException.cs
new file mode 100644 (file)
index 0000000..3f7571d
--- /dev/null
@@ -0,0 +1,90 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       /// <summary> 
+       /// 
+       /// 
+       /// </summary>
+       [Serializable]
+       public class FieldReaderException:System.SystemException
+       {
+               /// <summary> Constructs a new runtime exception with <code>null</code> as its
+               /// detail message.  The cause is not initialized, and may subsequently be
+        /// initialized by a call to {@link #innerException}.
+               /// </summary>
+               public FieldReaderException()
+               {
+               }
+               
+               /// <summary> Constructs a new runtime exception with the specified cause and a
+               /// detail message of <tt>(cause==null &#63; null : cause.toString())</tt>
+               /// (which typically contains the class and detail message of
+               /// <tt>cause</tt>).  
+               /// <p/>
+               /// This constructor is useful for runtime exceptions
+               /// that are little more than wrappers for other throwables.
+               /// 
+               /// </summary>
+               /// <param name="cause">the cause (which is saved for later retrieval by the
+               /// {@link #InnerException()} method).  (A <tt>null</tt> value is
+               /// permitted, and indicates that the cause is nonexistent or
+               /// unknown.)
+               /// </param>
+               /// <since> 1.4
+               /// </since>
+               public FieldReaderException(System.Exception cause):base((cause == null)?null:cause.Message, cause)
+               {
+               }
+               
+               /// <summary> Constructs a new runtime exception with the specified detail message.
+               /// The cause is not initialized, and may subsequently be initialized by a
+        /// call to {@link #innerException}.
+               /// 
+               /// </summary>
+               /// <param name="message">the detail message. The detail message is saved for
+               /// later retrieval by the {@link #getMessage()} method.
+               /// </param>
+               public FieldReaderException(System.String message):base(message)
+               {
+               }
+               
+               /// <summary> Constructs a new runtime exception with the specified detail message and
+               /// cause.  <p/>Note that the detail message associated with
+               /// <code>cause</code> is <i>not</i> automatically incorporated in
+               /// this runtime exception's detail message.
+               /// 
+               /// </summary>
+               /// <param name="message">the detail message (which is saved for later retrieval
+               /// by the {@link #getMessage()} method).
+               /// </param>
+               /// <param name="cause">  the cause (which is saved for later retrieval by the
+               /// {@link #InnerException()} method).  (A <tt>null</tt> value is
+               /// permitted, and indicates that the cause is nonexistent or
+               /// unknown.)
+               /// </param>
+               /// <since> 1.4
+               /// </since>
+               public FieldReaderException(System.String message, System.Exception cause):base(message, cause)
+               {
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/FieldSortedTermVectorMapper.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/FieldSortedTermVectorMapper.cs
new file mode 100644 (file)
index 0000000..3eed634
--- /dev/null
@@ -0,0 +1,76 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       /// <summary> For each Field, store a sorted collection of {@link TermVectorEntry}s
+       /// <p/>
+       /// This is not thread-safe.
+       /// </summary>
+       public class FieldSortedTermVectorMapper:TermVectorMapper
+       {
+               private System.Collections.IDictionary fieldToTerms = new System.Collections.Hashtable();
+               private System.Collections.Generic.SortedDictionary<object, object> currentSet;
+               private System.String currentField;
+               private System.Collections.Generic.IComparer<object> comparator;
+               
+               /// <summary> </summary>
+               /// <param name="comparator">A Comparator for sorting {@link TermVectorEntry}s
+               /// </param>
+               public FieldSortedTermVectorMapper(System.Collections.Generic.IComparer<object> comparator):this(false, false, comparator)
+               {
+               }
+               
+               
+               public FieldSortedTermVectorMapper(bool ignoringPositions, bool ignoringOffsets, System.Collections.Generic.IComparer<object> comparator):base(ignoringPositions, ignoringOffsets)
+               {
+                       this.comparator = comparator;
+               }
+               
+               public override void  Map(System.String term, int frequency, TermVectorOffsetInfo[] offsets, int[] positions)
+               {
+                       TermVectorEntry entry = new TermVectorEntry(currentField, term, frequency, offsets, positions);
+                       currentSet.Add(entry, entry);
+               }
+               
+               public override void  SetExpectations(System.String field, int numTerms, bool storeOffsets, bool storePositions)
+               {
+                       currentSet = new System.Collections.Generic.SortedDictionary<object, object>(comparator);
+                       currentField = field;
+                       fieldToTerms[field] = currentSet;
+               }
+               
+               /// <summary> Get the mapping between fields and terms, sorted by the comparator
+               /// 
+               /// </summary>
+               /// <returns> A map between field names and {@link java.util.SortedSet}s per field.  SortedSet entries are {@link TermVectorEntry}
+               /// </returns>
+               public virtual System.Collections.IDictionary GetFieldToTerms()
+               {
+                       return fieldToTerms;
+               }
+               
+               
+               public virtual System.Collections.Generic.IComparer<object> GetComparator()
+               {
+                       return comparator;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/FieldsReader.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/FieldsReader.cs
new file mode 100644 (file)
index 0000000..3936e26
--- /dev/null
@@ -0,0 +1,777 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using TokenStream = Mono.Lucene.Net.Analysis.TokenStream;
+using Mono.Lucene.Net.Documents;
+using AlreadyClosedException = Mono.Lucene.Net.Store.AlreadyClosedException;
+using BufferedIndexInput = Mono.Lucene.Net.Store.BufferedIndexInput;
+using Directory = Mono.Lucene.Net.Store.Directory;
+using IndexInput = Mono.Lucene.Net.Store.IndexInput;
+using CloseableThreadLocal = Mono.Lucene.Net.Util.CloseableThreadLocal;
+using StringHelper = Mono.Lucene.Net.Util.StringHelper;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       /// <summary> Class responsible for access to stored document fields.
+       /// <p/>
+       /// It uses &lt;segment&gt;.fdt and &lt;segment&gt;.fdx; files.
+       /// 
+       /// </summary>
+       /// <version>  $Id: FieldsReader.java 801344 2009-08-05 18:05:06Z yonik $
+       /// </version>
+       public sealed class FieldsReader : System.ICloneable
+       {
+               private FieldInfos fieldInfos;
+               
+               // The main fieldStream, used only for cloning.
+               private IndexInput cloneableFieldsStream;
+               
+               // This is a clone of cloneableFieldsStream used for reading documents.
+               // It should not be cloned outside of a synchronized context.
+               private IndexInput fieldsStream;
+               
+               private IndexInput cloneableIndexStream;
+               private IndexInput indexStream;
+               private int numTotalDocs;
+               private int size;
+               private bool closed;
+               private int format;
+               private int formatSize;
+               
+               // The docID offset where our docs begin in the index
+               // file.  This will be 0 if we have our own private file.
+               private int docStoreOffset;
+               
+               private CloseableThreadLocal fieldsStreamTL = new CloseableThreadLocal();
+               private bool isOriginal = false;
+               
+               /// <summary>Returns a cloned FieldsReader that shares open
+               /// IndexInputs with the original one.  It is the caller's
+               /// job not to close the original FieldsReader until all
+               /// clones are called (eg, currently SegmentReader manages
+               /// this logic). 
+               /// </summary>
+               public System.Object Clone()
+               {
+                       EnsureOpen();
+                       return new FieldsReader(fieldInfos, numTotalDocs, size, format, formatSize, docStoreOffset, cloneableFieldsStream, cloneableIndexStream);
+               }
+               
+               // Used only by clone
+               private FieldsReader(FieldInfos fieldInfos, int numTotalDocs, int size, int format, int formatSize, int docStoreOffset, IndexInput cloneableFieldsStream, IndexInput cloneableIndexStream)
+               {
+                       this.fieldInfos = fieldInfos;
+                       this.numTotalDocs = numTotalDocs;
+                       this.size = size;
+                       this.format = format;
+                       this.formatSize = formatSize;
+                       this.docStoreOffset = docStoreOffset;
+                       this.cloneableFieldsStream = cloneableFieldsStream;
+                       this.cloneableIndexStream = cloneableIndexStream;
+                       fieldsStream = (IndexInput) cloneableFieldsStream.Clone();
+                       indexStream = (IndexInput) cloneableIndexStream.Clone();
+               }
+               
+               public /*internal*/ FieldsReader(Directory d, System.String segment, FieldInfos fn):this(d, segment, fn, BufferedIndexInput.BUFFER_SIZE, - 1, 0)
+               {
+               }
+               
+               internal FieldsReader(Directory d, System.String segment, FieldInfos fn, int readBufferSize):this(d, segment, fn, readBufferSize, - 1, 0)
+               {
+               }
+               
+               internal FieldsReader(Directory d, System.String segment, FieldInfos fn, int readBufferSize, int docStoreOffset, int size)
+               {
+                       bool success = false;
+                       isOriginal = true;
+                       try
+                       {
+                               fieldInfos = fn;
+                               
+                               cloneableFieldsStream = d.OpenInput(segment + "." + IndexFileNames.FIELDS_EXTENSION, readBufferSize);
+                               cloneableIndexStream = d.OpenInput(segment + "." + IndexFileNames.FIELDS_INDEX_EXTENSION, readBufferSize);
+                               
+                               // First version of fdx did not include a format
+                               // header, but, the first int will always be 0 in that
+                               // case
+                               int firstInt = cloneableIndexStream.ReadInt();
+                               if (firstInt == 0)
+                                       format = 0;
+                               else
+                                       format = firstInt;
+                               
+                               if (format > FieldsWriter.FORMAT_CURRENT
+                    /* extra support for Lucene 3.0 indexes: */ && format != FieldsWriter.FORMAT_LUCENE_3_0_NO_COMPRESSED_FIELDS
+                    )
+                                       throw new CorruptIndexException("Incompatible format version: " + format + " expected " + FieldsWriter.FORMAT_CURRENT + " or lower");
+                               
+                               if (format > FieldsWriter.FORMAT)
+                                       formatSize = 4;
+                               else
+                                       formatSize = 0;
+                               
+                               if (format < FieldsWriter.FORMAT_VERSION_UTF8_LENGTH_IN_BYTES)
+                                       cloneableFieldsStream.SetModifiedUTF8StringsMode();
+                               
+                               fieldsStream = (IndexInput) cloneableFieldsStream.Clone();
+                               
+                               long indexSize = cloneableIndexStream.Length() - formatSize;
+                               
+                               if (docStoreOffset != - 1)
+                               {
+                                       // We read only a slice out of this shared fields file
+                                       this.docStoreOffset = docStoreOffset;
+                                       this.size = size;
+                                       
+                                       // Verify the file is long enough to hold all of our
+                                       // docs
+                                       System.Diagnostics.Debug.Assert(((int)(indexSize / 8)) >= size + this.docStoreOffset, "indexSize=" + indexSize + " size=" + size + " docStoreOffset=" + docStoreOffset);
+                               }
+                               else
+                               {
+                                       this.docStoreOffset = 0;
+                                       this.size = (int) (indexSize >> 3);
+                               }
+                               
+                               indexStream = (IndexInput) cloneableIndexStream.Clone();
+                               numTotalDocs = (int) (indexSize >> 3);
+                               success = true;
+                       }
+                       finally
+                       {
+                               // With lock-less commits, it's entirely possible (and
+                               // fine) to hit a FileNotFound exception above. In
+                               // this case, we want to explicitly close any subset
+                               // of things that were opened so that we don't have to
+                               // wait for a GC to do so.
+                               if (!success)
+                               {
+                                       Close();
+                               }
+                       }
+               }
+               
+               /// <throws>  AlreadyClosedException if this FieldsReader is closed </throws>
+               internal void  EnsureOpen()
+               {
+                       if (closed)
+                       {
+                               throw new AlreadyClosedException("this FieldsReader is closed");
+                       }
+               }
+               
+               /// <summary> Closes the underlying {@link Mono.Lucene.Net.Store.IndexInput} streams, including any ones associated with a
+               /// lazy implementation of a Field.  This means that the Fields values will not be accessible.
+               /// 
+               /// </summary>
+               /// <throws>  IOException </throws>
+               public /*internal*/ void  Close()
+               {
+                       if (!closed)
+                       {
+                               if (fieldsStream != null)
+                               {
+                                       fieldsStream.Close();
+                               }
+                               if (isOriginal)
+                               {
+                                       if (cloneableFieldsStream != null)
+                                       {
+                                               cloneableFieldsStream.Close();
+                                       }
+                                       if (cloneableIndexStream != null)
+                                       {
+                                               cloneableIndexStream.Close();
+                                       }
+                               }
+                               if (indexStream != null)
+                               {
+                                       indexStream.Close();
+                               }
+                               fieldsStreamTL.Close();
+                               closed = true;
+                       }
+               }
+               
+               public /*internal*/ int Size()
+               {
+                       return size;
+               }
+               
+               private void  SeekIndex(int docID)
+               {
+                       indexStream.Seek(formatSize + (docID + docStoreOffset) * 8L);
+               }
+               
+               internal bool CanReadRawDocs()
+               {
+                       return format >= FieldsWriter.FORMAT_VERSION_UTF8_LENGTH_IN_BYTES;
+               }
+               
+               public /*internal*/ Document Doc(int n, FieldSelector fieldSelector)
+               {
+                       SeekIndex(n);
+                       long position = indexStream.ReadLong();
+                       fieldsStream.Seek(position);
+                       
+                       Document doc = new Document();
+                       int numFields = fieldsStream.ReadVInt();
+                       for (int i = 0; i < numFields; i++)
+                       {
+                               int fieldNumber = fieldsStream.ReadVInt();
+                               FieldInfo fi = fieldInfos.FieldInfo(fieldNumber);
+                               FieldSelectorResult acceptField = fieldSelector == null?FieldSelectorResult.LOAD:fieldSelector.Accept(fi.name);
+                               
+                               byte bits = fieldsStream.ReadByte();
+                               System.Diagnostics.Debug.Assert(bits <= FieldsWriter.FIELD_IS_COMPRESSED + FieldsWriter.FIELD_IS_TOKENIZED + FieldsWriter.FIELD_IS_BINARY);
+                               
+                               bool compressed = (bits & FieldsWriter.FIELD_IS_COMPRESSED) != 0;
+                               bool tokenize = (bits & FieldsWriter.FIELD_IS_TOKENIZED) != 0;
+                               bool binary = (bits & FieldsWriter.FIELD_IS_BINARY) != 0;
+                               //TODO: Find an alternative approach here if this list continues to grow beyond the
+                               //list of 5 or 6 currently here.  See Lucene 762 for discussion
+                               if (acceptField.Equals(FieldSelectorResult.LOAD))
+                               {
+                                       AddField(doc, fi, binary, compressed, tokenize);
+                               }
+                               else if (acceptField.Equals(FieldSelectorResult.LOAD_FOR_MERGE))
+                               {
+                                       AddFieldForMerge(doc, fi, binary, compressed, tokenize);
+                               }
+                               else if (acceptField.Equals(FieldSelectorResult.LOAD_AND_BREAK))
+                               {
+                                       AddField(doc, fi, binary, compressed, tokenize);
+                                       break; //Get out of this loop
+                               }
+                               else if (acceptField.Equals(FieldSelectorResult.LAZY_LOAD))
+                               {
+                                       AddFieldLazy(doc, fi, binary, compressed, tokenize);
+                               }
+                               else if (acceptField.Equals(FieldSelectorResult.SIZE))
+                               {
+                                       SkipField(binary, compressed, AddFieldSize(doc, fi, binary, compressed));
+                               }
+                               else if (acceptField.Equals(FieldSelectorResult.SIZE_AND_BREAK))
+                               {
+                                       AddFieldSize(doc, fi, binary, compressed);
+                                       break;
+                               }
+                               else
+                               {
+                                       SkipField(binary, compressed);
+                               }
+                       }
+                       
+                       return doc;
+               }
+               
+               /// <summary>Returns the length in bytes of each raw document in a
+               /// contiguous range of length numDocs starting with
+               /// startDocID.  Returns the IndexInput (the fieldStream),
+               /// already seeked to the starting point for startDocID.
+               /// </summary>
+               internal IndexInput RawDocs(int[] lengths, int startDocID, int numDocs)
+               {
+                       SeekIndex(startDocID);
+                       long startOffset = indexStream.ReadLong();
+                       long lastOffset = startOffset;
+                       int count = 0;
+                       while (count < numDocs)
+                       {
+                               long offset;
+                               int docID = docStoreOffset + startDocID + count + 1;
+                               System.Diagnostics.Debug.Assert(docID <= numTotalDocs);
+                               if (docID < numTotalDocs)
+                                       offset = indexStream.ReadLong();
+                               else
+                                       offset = fieldsStream.Length();
+                               lengths[count++] = (int) (offset - lastOffset);
+                               lastOffset = offset;
+                       }
+                       
+                       fieldsStream.Seek(startOffset);
+                       
+                       return fieldsStream;
+               }
+               
+               /// <summary> Skip the field.  We still have to read some of the information about the field, but can skip past the actual content.
+               /// This will have the most payoff on large fields.
+               /// </summary>
+               private void  SkipField(bool binary, bool compressed)
+               {
+                       SkipField(binary, compressed, fieldsStream.ReadVInt());
+               }
+               
+               private void  SkipField(bool binary, bool compressed, int toRead)
+               {
+                       if (format >= FieldsWriter.FORMAT_VERSION_UTF8_LENGTH_IN_BYTES || binary || compressed)
+                       {
+                               fieldsStream.Seek(fieldsStream.GetFilePointer() + toRead);
+                       }
+                       else
+                       {
+                               // We need to skip chars.  This will slow us down, but still better
+                               fieldsStream.SkipChars(toRead);
+                       }
+               }
+               
+               private void  AddFieldLazy(Document doc, FieldInfo fi, bool binary, bool compressed, bool tokenize)
+               {
+                       if (binary)
+                       {
+                               int toRead = fieldsStream.ReadVInt();
+                               long pointer = fieldsStream.GetFilePointer();
+                               if (compressed)
+                               {
+                                       //was: doc.add(new Fieldable(fi.name, uncompress(b), Fieldable.Store.COMPRESS));
+                                       doc.Add(new LazyField(this, fi.name, Field.Store.COMPRESS, toRead, pointer, binary));
+                               }
+                               else
+                               {
+                                       //was: doc.add(new Fieldable(fi.name, b, Fieldable.Store.YES));
+                                       doc.Add(new LazyField(this, fi.name, Field.Store.YES, toRead, pointer, binary));
+                               }
+                               //Need to move the pointer ahead by toRead positions
+                               fieldsStream.Seek(pointer + toRead);
+                       }
+                       else
+                       {
+                               Field.Store store = Field.Store.YES;
+                               Field.Index index = GetIndexType(fi, tokenize);
+                               Field.TermVector termVector = GetTermVectorType(fi);
+                               
+                               AbstractField f;
+                               if (compressed)
+                               {
+                                       store = Field.Store.COMPRESS;
+                                       int toRead = fieldsStream.ReadVInt();
+                                       long pointer = fieldsStream.GetFilePointer();
+                                       f = new LazyField(this, fi.name, store, toRead, pointer, binary);
+                                       //skip over the part that we aren't loading
+                                       fieldsStream.Seek(pointer + toRead);
+                                       f.SetOmitNorms(fi.omitNorms);
+                                       f.SetOmitTermFreqAndPositions(fi.omitTermFreqAndPositions);
+                               }
+                               else
+                               {
+                                       int length = fieldsStream.ReadVInt();
+                                       long pointer = fieldsStream.GetFilePointer();
+                                       //Skip ahead of where we are by the length of what is stored
+                                       if (format >= FieldsWriter.FORMAT_VERSION_UTF8_LENGTH_IN_BYTES)
+                                               fieldsStream.Seek(pointer + length);
+                                       else
+                                               fieldsStream.SkipChars(length);
+                                       f = new LazyField(this, fi.name, store, index, termVector, length, pointer, binary);
+                                       f.SetOmitNorms(fi.omitNorms);
+                                       f.SetOmitTermFreqAndPositions(fi.omitTermFreqAndPositions);
+                               }
+                               doc.Add(f);
+                       }
+               }
+               
+               // in merge mode we don't uncompress the data of a compressed field
+               private void  AddFieldForMerge(Document doc, FieldInfo fi, bool binary, bool compressed, bool tokenize)
+               {
+                       System.Object data;
+                       
+                       if (binary || compressed)
+                       {
+                               int toRead = fieldsStream.ReadVInt();
+                               byte[] b = new byte[toRead];
+                               fieldsStream.ReadBytes(b, 0, b.Length);
+                               data = b;
+                       }
+                       else
+                       {
+                               data = fieldsStream.ReadString();
+                       }
+                       
+                       doc.Add(new FieldForMerge(data, fi, binary, compressed, tokenize));
+               }
+               
+               private void  AddField(Document doc, FieldInfo fi, bool binary, bool compressed, bool tokenize)
+               {
+                       
+                       //we have a binary stored field, and it may be compressed
+                       if (binary)
+                       {
+                               int toRead = fieldsStream.ReadVInt();
+                               byte[] b = new byte[toRead];
+                               fieldsStream.ReadBytes(b, 0, b.Length);
+                               if (compressed)
+                                       doc.Add(new Field(fi.name, Uncompress(b), Field.Store.COMPRESS));
+                               else
+                                       doc.Add(new Field(fi.name, b, Field.Store.YES));
+                       }
+                       else
+                       {
+                               Field.Store store = Field.Store.YES;
+                               Field.Index index = GetIndexType(fi, tokenize);
+                               Field.TermVector termVector = GetTermVectorType(fi);
+                               
+                               AbstractField f;
+                               if (compressed)
+                               {
+                                       store = Field.Store.COMPRESS;
+                                       int toRead = fieldsStream.ReadVInt();
+                                       
+                                       byte[] b = new byte[toRead];
+                                       fieldsStream.ReadBytes(b, 0, b.Length);
+                                       f = new Field(fi.name, false, System.Text.Encoding.GetEncoding("UTF-8").GetString(Uncompress(b)), store, index, termVector);
+                                       f.SetOmitTermFreqAndPositions(fi.omitTermFreqAndPositions);
+                                       f.SetOmitNorms(fi.omitNorms);
+                               }
+                               else
+                               {
+                                       f = new Field(fi.name, false, fieldsStream.ReadString(), store, index, termVector);
+                                       f.SetOmitTermFreqAndPositions(fi.omitTermFreqAndPositions);
+                                       f.SetOmitNorms(fi.omitNorms);
+                               }
+                               doc.Add(f);
+                       }
+               }
+               
+               // Add the size of field as a byte[] containing the 4 bytes of the integer byte size (high order byte first; char = 2 bytes)
+               // Read just the size -- caller must skip the field content to continue reading fields
+               // Return the size in bytes or chars, depending on field type
+               private int AddFieldSize(Document doc, FieldInfo fi, bool binary, bool compressed)
+               {
+                       int size = fieldsStream.ReadVInt(), bytesize = binary || compressed?size:2 * size;
+                       byte[] sizebytes = new byte[4];
+                       sizebytes[0] = (byte) (SupportClass.Number.URShift(bytesize, 24));
+                       sizebytes[1] = (byte) (SupportClass.Number.URShift(bytesize, 16));
+                       sizebytes[2] = (byte) (SupportClass.Number.URShift(bytesize, 8));
+                       sizebytes[3] = (byte) bytesize;
+                       doc.Add(new Field(fi.name, sizebytes, Field.Store.YES));
+                       return size;
+               }
+               
+               private Field.TermVector GetTermVectorType(FieldInfo fi)
+               {
+                       Field.TermVector termVector = null;
+                       if (fi.storeTermVector)
+                       {
+                               if (fi.storeOffsetWithTermVector)
+                               {
+                                       if (fi.storePositionWithTermVector)
+                                       {
+                                               termVector = Field.TermVector.WITH_POSITIONS_OFFSETS;
+                                       }
+                                       else
+                                       {
+                                               termVector = Field.TermVector.WITH_OFFSETS;
+                                       }
+                               }
+                               else if (fi.storePositionWithTermVector)
+                               {
+                                       termVector = Field.TermVector.WITH_POSITIONS;
+                               }
+                               else
+                               {
+                                       termVector = Field.TermVector.YES;
+                               }
+                       }
+                       else
+                       {
+                               termVector = Field.TermVector.NO;
+                       }
+                       return termVector;
+               }
+               
+               private Field.Index GetIndexType(FieldInfo fi, bool tokenize)
+               {
+                       Field.Index index;
+                       if (fi.isIndexed && tokenize)
+                               index = Field.Index.ANALYZED;
+                       else if (fi.isIndexed && !tokenize)
+                               index = Field.Index.NOT_ANALYZED;
+                       else
+                               index = Field.Index.NO;
+                       return index;
+               }
+               
+               /// <summary> A Lazy implementation of Fieldable that differs loading of fields until asked for, instead of when the Document is
+               /// loaded.
+               /// </summary>
+               [Serializable]
+               private class LazyField:AbstractField, Fieldable
+               {
+                       private void  InitBlock(FieldsReader enclosingInstance)
+                       {
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private FieldsReader enclosingInstance;
+                       public FieldsReader Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       private int toRead;
+                       private long pointer;
+                       
+                       public LazyField(FieldsReader enclosingInstance, System.String name, Field.Store store, int toRead, long pointer, bool isBinary):base(name, store, Field.Index.NO, Field.TermVector.NO)
+                       {
+                               InitBlock(enclosingInstance);
+                               this.toRead = toRead;
+                               this.pointer = pointer;
+                               this.isBinary = isBinary;
+                               if (isBinary)
+                                       binaryLength = toRead;
+                               lazy = true;
+                       }
+                       
+                       public LazyField(FieldsReader enclosingInstance, System.String name, Field.Store store, Field.Index index, Field.TermVector termVector, int toRead, long pointer, bool isBinary):base(name, store, index, termVector)
+                       {
+                               InitBlock(enclosingInstance);
+                               this.toRead = toRead;
+                               this.pointer = pointer;
+                               this.isBinary = isBinary;
+                               if (isBinary)
+                                       binaryLength = toRead;
+                               lazy = true;
+                       }
+                       
+                       private IndexInput GetFieldStream()
+                       {
+                               IndexInput localFieldsStream = (IndexInput) Enclosing_Instance.fieldsStreamTL.Get();
+                               if (localFieldsStream == null)
+                               {
+                                       localFieldsStream = (IndexInput) Enclosing_Instance.cloneableFieldsStream.Clone();
+                                       Enclosing_Instance.fieldsStreamTL.Set(localFieldsStream);
+                               }
+                               return localFieldsStream;
+                       }
+                       
+                       /// <summary>The value of the field in Binary, or null.  If null, the Reader value,
+                       /// String value, or TokenStream value is used. Exactly one of stringValue(), 
+                       /// readerValue(), binaryValue(), and tokenStreamValue() must be set. 
+                       /// </summary>
+                       public override byte[] BinaryValue()
+                       {
+                               return GetBinaryValue(null);
+                       }
+                       
+                       /// <summary>The value of the field as a Reader, or null.  If null, the String value,
+                       /// binary value, or TokenStream value is used.  Exactly one of stringValue(), 
+                       /// readerValue(), binaryValue(), and tokenStreamValue() must be set. 
+                       /// </summary>
+                       public override System.IO.TextReader ReaderValue()
+                       {
+                               Enclosing_Instance.EnsureOpen();
+                               return null;
+                       }
+                       
+                       /// <summary>The value of the field as a TokenStream, or null.  If null, the Reader value,
+                       /// String value, or binary value is used. Exactly one of stringValue(), 
+                       /// readerValue(), binaryValue(), and tokenStreamValue() must be set. 
+                       /// </summary>
+                       public override TokenStream TokenStreamValue()
+                       {
+                               Enclosing_Instance.EnsureOpen();
+                               return null;
+                       }
+                       
+                       /// <summary>The value of the field as a String, or null.  If null, the Reader value,
+                       /// binary value, or TokenStream value is used.  Exactly one of stringValue(), 
+                       /// readerValue(), binaryValue(), and tokenStreamValue() must be set. 
+                       /// </summary>
+                       public override System.String StringValue()
+                       {
+                               Enclosing_Instance.EnsureOpen();
+                               if (isBinary)
+                                       return null;
+                               else
+                               {
+                                       if (fieldsData == null)
+                                       {
+                                               IndexInput localFieldsStream = GetFieldStream();
+                                               try
+                                               {
+                                                       localFieldsStream.Seek(pointer);
+                                                       if (isCompressed)
+                                                       {
+                                                               byte[] b = new byte[toRead];
+                                                               localFieldsStream.ReadBytes(b, 0, b.Length);
+                                                               fieldsData = System.Text.Encoding.GetEncoding("UTF-8").GetString(Enclosing_Instance.Uncompress(b));
+                                                       }
+                                                       else
+                                                       {
+                                                               if (Enclosing_Instance.format >= FieldsWriter.FORMAT_VERSION_UTF8_LENGTH_IN_BYTES)
+                                                               {
+                                                                       byte[] bytes = new byte[toRead];
+                                                                       localFieldsStream.ReadBytes(bytes, 0, toRead);
+                                                                       fieldsData = System.Text.Encoding.GetEncoding("UTF-8").GetString(bytes);
+                                                               }
+                                                               else
+                                                               {
+                                                                       //read in chars b/c we already know the length we need to read
+                                                                       char[] chars = new char[toRead];
+                                                                       localFieldsStream.ReadChars(chars, 0, toRead);
+                                                                       fieldsData = new System.String(chars);
+                                                               }
+                                                       }
+                                               }
+                                               catch (System.IO.IOException e)
+                                               {
+                                                       throw new FieldReaderException(e);
+                                               }
+                                       }
+                                       return (System.String) fieldsData;
+                               }
+                       }
+                       
+                       public long GetPointer()
+                       {
+                               Enclosing_Instance.EnsureOpen();
+                               return pointer;
+                       }
+                       
+                       public void  SetPointer(long pointer)
+                       {
+                               Enclosing_Instance.EnsureOpen();
+                               this.pointer = pointer;
+                       }
+                       
+                       public int GetToRead()
+                       {
+                               Enclosing_Instance.EnsureOpen();
+                               return toRead;
+                       }
+                       
+                       public void  SetToRead(int toRead)
+                       {
+                               Enclosing_Instance.EnsureOpen();
+                               this.toRead = toRead;
+                       }
+                       
+                       public override byte[] GetBinaryValue(byte[] result)
+                       {
+                               Enclosing_Instance.EnsureOpen();
+                               
+                               if (isBinary)
+                               {
+                                       if (fieldsData == null)
+                                       {
+                                               // Allocate new buffer if result is null or too small
+                                               byte[] b;
+                                               if (result == null || result.Length < toRead)
+                                                       b = new byte[toRead];
+                                               else
+                                                       b = result;
+                                               
+                                               IndexInput localFieldsStream = GetFieldStream();
+                                               
+                                               // Throw this IOException since IndexReader.document does so anyway, so probably not that big of a change for people
+                                               // since they are already handling this exception when getting the document
+                                               try
+                                               {
+                                                       localFieldsStream.Seek(pointer);
+                                                       localFieldsStream.ReadBytes(b, 0, toRead);
+                                                       if (isCompressed == true)
+                                                       {
+                                                               fieldsData = Enclosing_Instance.Uncompress(b);
+                                                       }
+                                                       else
+                                                       {
+                                                               fieldsData = b;
+                                                       }
+                                               }
+                                               catch (System.IO.IOException e)
+                                               {
+                                                       throw new FieldReaderException(e);
+                                               }
+                                               
+                                               binaryOffset = 0;
+                                               binaryLength = toRead;
+                                       }
+                                       
+                                       return (byte[]) fieldsData;
+                               }
+                               else
+                                       return null;
+                       }
+               }
+               
+               private byte[] Uncompress(byte[] b)
+               {
+                       try
+                       {
+                               return CompressionTools.Decompress(b);
+                       }
+                       catch (Exception e)
+                       {
+                               // this will happen if the field is not compressed
+                               CorruptIndexException newException = new CorruptIndexException("field data are in wrong format: " + e.ToString(), e);
+                               throw newException;
+                       }
+               }
+               
+               // Instances of this class hold field properties and data
+               // for merge
+               [Serializable]
+               internal sealed class FieldForMerge:AbstractField
+               {
+                       public override System.String StringValue()
+                       {
+                               return (System.String) this.fieldsData;
+                       }
+                       
+                       public override System.IO.TextReader ReaderValue()
+                       {
+                               // not needed for merge
+                               return null;
+                       }
+                       
+                       public override byte[] BinaryValue()
+                       {
+                               return (byte[]) this.fieldsData;
+                       }
+                       
+                       public override TokenStream TokenStreamValue()
+                       {
+                               // not needed for merge
+                               return null;
+                       }
+                       
+                       public FieldForMerge(System.Object value_Renamed, FieldInfo fi, bool binary, bool compressed, bool tokenize)
+                       {
+                               this.isStored = true;
+                               this.fieldsData = value_Renamed;
+                               this.isCompressed = compressed;
+                               this.isBinary = binary;
+                               if (binary)
+                                       binaryLength = ((byte[]) value_Renamed).Length;
+                               
+                               this.isTokenized = tokenize;
+                               
+                               this.name = StringHelper.Intern(fi.name);
+                               this.isIndexed = fi.isIndexed;
+                               this.omitNorms = fi.omitNorms;
+                               this.omitTermFreqAndPositions = fi.omitTermFreqAndPositions;
+                               this.storeOffsetWithTermVector = fi.storeOffsetWithTermVector;
+                               this.storePositionWithTermVector = fi.storePositionWithTermVector;
+                               this.storeTermVector = fi.storeTermVector;
+                       }
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/FieldsWriter.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/FieldsWriter.cs
new file mode 100644 (file)
index 0000000..2d63a51
--- /dev/null
@@ -0,0 +1,347 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using CompressionTools = Mono.Lucene.Net.Documents.CompressionTools;
+using Document = Mono.Lucene.Net.Documents.Document;
+using Fieldable = Mono.Lucene.Net.Documents.Fieldable;
+using Directory = Mono.Lucene.Net.Store.Directory;
+using IndexInput = Mono.Lucene.Net.Store.IndexInput;
+using IndexOutput = Mono.Lucene.Net.Store.IndexOutput;
+using RAMOutputStream = Mono.Lucene.Net.Store.RAMOutputStream;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       sealed class FieldsWriter
+       {
+               internal const byte FIELD_IS_TOKENIZED = (byte) (0x1);
+               internal const byte FIELD_IS_BINARY = (byte) (0x2);
+               internal const byte FIELD_IS_COMPRESSED = (byte) (0x4);
+               
+               // Original format
+               internal const int FORMAT = 0;
+               
+               // Changed strings to UTF8
+               internal const int FORMAT_VERSION_UTF8_LENGTH_IN_BYTES = 1;
+                 
+        // Lucene 3.0: Removal of compressed fields: This is only to provide compatibility with 3.0-created indexes
+        // new segments always use the FORMAT_CURRENT. As the index format did not change in 3.0, only
+        // new stored field files that no longer support compression are marked as such to optimize merging.
+        // But 2.9 can still read them.
+        internal static int FORMAT_LUCENE_3_0_NO_COMPRESSED_FIELDS = 2;
+               
+               // NOTE: if you introduce a new format, make it 1 higher
+               // than the current one, and always change this if you
+               // switch to a new format!
+               internal static readonly int FORMAT_CURRENT = FORMAT_VERSION_UTF8_LENGTH_IN_BYTES;
+               
+               private FieldInfos fieldInfos;
+               
+               private IndexOutput fieldsStream;
+               
+               private IndexOutput indexStream;
+               
+               private bool doClose;
+               
+               internal FieldsWriter(Directory d, System.String segment, FieldInfos fn)
+               {
+                       fieldInfos = fn;
+                       
+                       bool success = false;
+                       System.String fieldsName = segment + "." + IndexFileNames.FIELDS_EXTENSION;
+                       try
+                       {
+                               fieldsStream = d.CreateOutput(fieldsName);
+                               fieldsStream.WriteInt(FORMAT_CURRENT);
+                               success = true;
+                       }
+                       finally
+                       {
+                               if (!success)
+                               {
+                                       try
+                                       {
+                                               Close();
+                                       }
+                                       catch (System.Exception t)
+                                       {
+                                               // Suppress so we keep throwing the original exception
+                                       }
+                                       try
+                                       {
+                                               d.DeleteFile(fieldsName);
+                                       }
+                                       catch (System.Exception t)
+                                       {
+                                               // Suppress so we keep throwing the original exception
+                                       }
+                               }
+                       }
+                       
+                       success = false;
+                       System.String indexName = segment + "." + IndexFileNames.FIELDS_INDEX_EXTENSION;
+                       try
+                       {
+                               indexStream = d.CreateOutput(indexName);
+                               indexStream.WriteInt(FORMAT_CURRENT);
+                               success = true;
+                       }
+                       finally
+                       {
+                               if (!success)
+                               {
+                                       try
+                                       {
+                                               Close();
+                                       }
+                                       catch (System.IO.IOException ioe)
+                                       {
+                                       }
+                                       try
+                                       {
+                                               d.DeleteFile(fieldsName);
+                                       }
+                                       catch (System.Exception t)
+                                       {
+                                               // Suppress so we keep throwing the original exception
+                                       }
+                                       try
+                                       {
+                                               d.DeleteFile(indexName);
+                                       }
+                                       catch (System.Exception t)
+                                       {
+                                               // Suppress so we keep throwing the original exception
+                                       }
+                               }
+                       }
+                       
+                       doClose = true;
+               }
+               
+               internal FieldsWriter(IndexOutput fdx, IndexOutput fdt, FieldInfos fn)
+               {
+                       fieldInfos = fn;
+                       fieldsStream = fdt;
+                       indexStream = fdx;
+                       doClose = false;
+               }
+               
+               internal void  SetFieldsStream(IndexOutput stream)
+               {
+                       this.fieldsStream = stream;
+               }
+               
+               // Writes the contents of buffer into the fields stream
+               // and adds a new entry for this document into the index
+               // stream.  This assumes the buffer was already written
+               // in the correct fields format.
+               internal void  FlushDocument(int numStoredFields, RAMOutputStream buffer)
+               {
+                       indexStream.WriteLong(fieldsStream.GetFilePointer());
+                       fieldsStream.WriteVInt(numStoredFields);
+                       buffer.WriteTo(fieldsStream);
+               }
+               
+               internal void  SkipDocument()
+               {
+                       indexStream.WriteLong(fieldsStream.GetFilePointer());
+                       fieldsStream.WriteVInt(0);
+               }
+               
+               internal void  Flush()
+               {
+                       indexStream.Flush();
+                       fieldsStream.Flush();
+               }
+               
+               internal void  Close()
+               {
+                       if (doClose)
+                       {
+                               
+                               try
+                               {
+                                       if (fieldsStream != null)
+                                       {
+                                               try
+                                               {
+                                                       fieldsStream.Close();
+                                               }
+                                               finally
+                                               {
+                                                       fieldsStream = null;
+                                               }
+                                       }
+                               }
+                               catch (System.IO.IOException ioe)
+                               {
+                                       try
+                                       {
+                                               if (indexStream != null)
+                                               {
+                                                       try
+                                                       {
+                                                               indexStream.Close();
+                                                       }
+                                                       finally
+                                                       {
+                                                               indexStream = null;
+                                                       }
+                                               }
+                                       }
+                                       catch (System.IO.IOException ioe2)
+                                       {
+                                               // Ignore so we throw only first IOException hit
+                                       }
+                                       throw ioe;
+                               }
+                               finally
+                               {
+                                       if (indexStream != null)
+                                       {
+                                               try
+                                               {
+                                                       indexStream.Close();
+                                               }
+                                               finally
+                                               {
+                                                       indexStream = null;
+                                               }
+                                       }
+                               }
+                       }
+               }
+               
+               internal void  WriteField(FieldInfo fi, Fieldable field)
+               {
+                       // if the field as an instanceof FieldsReader.FieldForMerge, we're in merge mode
+                       // and field.binaryValue() already returns the compressed value for a field
+                       // with isCompressed()==true, so we disable compression in that case
+                       bool disableCompression = (field is FieldsReader.FieldForMerge);
+                       fieldsStream.WriteVInt(fi.number);
+                       byte bits = 0;
+                       if (field.IsTokenized())
+                               bits |= FieldsWriter.FIELD_IS_TOKENIZED;
+                       if (field.IsBinary())
+                               bits |= FieldsWriter.FIELD_IS_BINARY;
+                       if (field.IsCompressed())
+                               bits |= FieldsWriter.FIELD_IS_COMPRESSED;
+                       
+                       fieldsStream.WriteByte(bits);
+                       
+                       if (field.IsCompressed())
+                       {
+                               // compression is enabled for the current field
+                               byte[] data;
+                               int len;
+                               int offset;
+                               if (disableCompression)
+                               {
+                                       // optimized case for merging, the data
+                                       // is already compressed
+                                       data = field.GetBinaryValue();
+                                       System.Diagnostics.Debug.Assert(data != null);
+                                       len = field.GetBinaryLength();
+                                       offset = field.GetBinaryOffset();
+                               }
+                               else
+                               {
+                                       // check if it is a binary field
+                                       if (field.IsBinary())
+                                       {
+                                               data = CompressionTools.Compress(field.GetBinaryValue(), field.GetBinaryOffset(), field.GetBinaryLength());
+                                       }
+                                       else
+                                       {
+                                               byte[] x = System.Text.Encoding.GetEncoding("UTF-8").GetBytes(field.StringValue());
+                                               data = CompressionTools.Compress(x, 0, x.Length);
+                                       }
+                                       len = data.Length;
+                                       offset = 0;
+                               }
+                               
+                               fieldsStream.WriteVInt(len);
+                               fieldsStream.WriteBytes(data, offset, len);
+                       }
+                       else
+                       {
+                               // compression is disabled for the current field
+                               if (field.IsBinary())
+                               {
+                                       byte[] data;
+                                       int len;
+                                       int offset;
+                                       data = field.GetBinaryValue();
+                                       len = field.GetBinaryLength();
+                                       offset = field.GetBinaryOffset();
+                                       
+                                       fieldsStream.WriteVInt(len);
+                                       fieldsStream.WriteBytes(data, offset, len);
+                               }
+                               else
+                               {
+                                       fieldsStream.WriteString(field.StringValue());
+                               }
+                       }
+               }
+               
+               /// <summary>Bulk write a contiguous series of documents.  The
+               /// lengths array is the length (in bytes) of each raw
+               /// document.  The stream IndexInput is the
+               /// fieldsStream from which we should bulk-copy all
+               /// bytes. 
+               /// </summary>
+               internal void  AddRawDocuments(IndexInput stream, int[] lengths, int numDocs)
+               {
+                       long position = fieldsStream.GetFilePointer();
+                       long start = position;
+                       for (int i = 0; i < numDocs; i++)
+                       {
+                               indexStream.WriteLong(position);
+                               position += lengths[i];
+                       }
+                       fieldsStream.CopyBytes(stream, position - start);
+                       System.Diagnostics.Debug.Assert(fieldsStream.GetFilePointer() == position);
+               }
+               
+               internal void  AddDocument(Document doc)
+               {
+                       indexStream.WriteLong(fieldsStream.GetFilePointer());
+                       
+                       int storedCount = 0;
+                       System.Collections.IEnumerator fieldIterator = doc.GetFields().GetEnumerator();
+                       while (fieldIterator.MoveNext())
+                       {
+                               Fieldable field = (Fieldable) fieldIterator.Current;
+                               if (field.IsStored())
+                                       storedCount++;
+                       }
+                       fieldsStream.WriteVInt(storedCount);
+                       
+                       fieldIterator = doc.GetFields().GetEnumerator();
+                       while (fieldIterator.MoveNext())
+                       {
+                               Fieldable field = (Fieldable) fieldIterator.Current;
+                               if (field.IsStored())
+                                       WriteField(fieldInfos.FieldInfo(field.Name()), field);
+                       }
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/FilterIndexReader.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/FilterIndexReader.cs
new file mode 100644 (file)
index 0000000..c6c0d45
--- /dev/null
@@ -0,0 +1,363 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using Document = Mono.Lucene.Net.Documents.Document;
+using FieldSelector = Mono.Lucene.Net.Documents.FieldSelector;
+using Directory = Mono.Lucene.Net.Store.Directory;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       /// <summary>A <code>FilterIndexReader</code> contains another IndexReader, which it
+       /// uses as its basic source of data, possibly transforming the data along the
+       /// way or providing additional functionality. The class
+       /// <code>FilterIndexReader</code> itself simply implements all abstract methods
+       /// of <code>IndexReader</code> with versions that pass all requests to the
+       /// contained index reader. Subclasses of <code>FilterIndexReader</code> may
+       /// further override some of these methods and may also provide additional
+       /// methods and fields.
+       /// </summary>
+       public class FilterIndexReader:IndexReader
+       {
+               
+               /// <summary>Base class for filtering {@link TermDocs} implementations. </summary>
+               public class FilterTermDocs : TermDocs
+               {
+                       protected internal TermDocs in_Renamed;
+                       
+                       public FilterTermDocs(TermDocs in_Renamed)
+                       {
+                               this.in_Renamed = in_Renamed;
+                       }
+                       
+                       public virtual void  Seek(Term term)
+                       {
+                               in_Renamed.Seek(term);
+                       }
+                       public virtual void  Seek(TermEnum termEnum)
+                       {
+                               in_Renamed.Seek(termEnum);
+                       }
+                       public virtual int Doc()
+                       {
+                               return in_Renamed.Doc();
+                       }
+                       public virtual int Freq()
+                       {
+                               return in_Renamed.Freq();
+                       }
+                       public virtual bool Next()
+                       {
+                               return in_Renamed.Next();
+                       }
+                       public virtual int Read(int[] docs, int[] freqs)
+                       {
+                               return in_Renamed.Read(docs, freqs);
+                       }
+                       public virtual bool SkipTo(int i)
+                       {
+                               return in_Renamed.SkipTo(i);
+                       }
+                       public virtual void  Close()
+                       {
+                               in_Renamed.Close();
+                       }
+               }
+               
+               /// <summary>Base class for filtering {@link TermPositions} implementations. </summary>
+               public class FilterTermPositions:FilterTermDocs, TermPositions
+               {
+                       
+                       public FilterTermPositions(TermPositions in_Renamed):base(in_Renamed)
+                       {
+                       }
+                       
+                       public virtual int NextPosition()
+                       {
+                               return ((TermPositions) this.in_Renamed).NextPosition();
+                       }
+                       
+                       public virtual int GetPayloadLength()
+                       {
+                               return ((TermPositions) this.in_Renamed).GetPayloadLength();
+                       }
+                       
+                       public virtual byte[] GetPayload(byte[] data, int offset)
+                       {
+                               return ((TermPositions) this.in_Renamed).GetPayload(data, offset);
+                       }
+                       
+                       
+                       // TODO: Remove warning after API has been finalized
+                       public virtual bool IsPayloadAvailable()
+                       {
+                               return ((TermPositions) this.in_Renamed).IsPayloadAvailable();
+                       }
+               }
+               
+               /// <summary>Base class for filtering {@link TermEnum} implementations. </summary>
+               public class FilterTermEnum:TermEnum
+               {
+                       protected internal TermEnum in_Renamed;
+                       
+                       public FilterTermEnum(TermEnum in_Renamed)
+                       {
+                               this.in_Renamed = in_Renamed;
+                       }
+                       
+                       public override bool Next()
+                       {
+                               return in_Renamed.Next();
+                       }
+                       public override Term Term()
+                       {
+                               return in_Renamed.Term();
+                       }
+                       public override int DocFreq()
+                       {
+                               return in_Renamed.DocFreq();
+                       }
+                       public override void  Close()
+                       {
+                               in_Renamed.Close();
+                       }
+               }
+               
+               protected internal IndexReader in_Renamed;
+               
+               /// <summary> <p/>Construct a FilterIndexReader based on the specified base reader.
+               /// Directory locking for delete, undeleteAll, and setNorm operations is
+               /// left to the base reader.<p/>
+               /// <p/>Note that base reader is closed if this FilterIndexReader is closed.<p/>
+               /// </summary>
+               /// <param name="in">specified base reader.
+               /// </param>
+               public FilterIndexReader(IndexReader in_Renamed):base()
+               {
+                       this.in_Renamed = in_Renamed;
+               }
+               
+               public override Directory Directory()
+               {
+                       return in_Renamed.Directory();
+               }
+               
+               public override TermFreqVector[] GetTermFreqVectors(int docNumber)
+               {
+                       EnsureOpen();
+                       return in_Renamed.GetTermFreqVectors(docNumber);
+               }
+               
+               public override TermFreqVector GetTermFreqVector(int docNumber, System.String field)
+               {
+                       EnsureOpen();
+                       return in_Renamed.GetTermFreqVector(docNumber, field);
+               }
+               
+               
+               public override void  GetTermFreqVector(int docNumber, System.String field, TermVectorMapper mapper)
+               {
+                       EnsureOpen();
+                       in_Renamed.GetTermFreqVector(docNumber, field, mapper);
+               }
+               
+               public override void  GetTermFreqVector(int docNumber, TermVectorMapper mapper)
+               {
+                       EnsureOpen();
+                       in_Renamed.GetTermFreqVector(docNumber, mapper);
+               }
+               
+               public override int NumDocs()
+               {
+                       // Don't call ensureOpen() here (it could affect performance)
+                       return in_Renamed.NumDocs();
+               }
+               
+               public override int MaxDoc()
+               {
+                       // Don't call ensureOpen() here (it could affect performance)
+                       return in_Renamed.MaxDoc();
+               }
+               
+               public override Document Document(int n, FieldSelector fieldSelector)
+               {
+                       EnsureOpen();
+                       return in_Renamed.Document(n, fieldSelector);
+               }
+               
+               public override bool IsDeleted(int n)
+               {
+                       // Don't call ensureOpen() here (it could affect performance)
+                       return in_Renamed.IsDeleted(n);
+               }
+               
+               public override bool HasDeletions()
+               {
+                       // Don't call ensureOpen() here (it could affect performance)
+                       return in_Renamed.HasDeletions();
+               }
+               
+               protected internal override void  DoUndeleteAll()
+               {
+                       in_Renamed.UndeleteAll();
+               }
+               
+               public override bool HasNorms(System.String field)
+               {
+                       EnsureOpen();
+                       return in_Renamed.HasNorms(field);
+               }
+               
+               public override byte[] Norms(System.String f)
+               {
+                       EnsureOpen();
+                       return in_Renamed.Norms(f);
+               }
+               
+               public override void  Norms(System.String f, byte[] bytes, int offset)
+               {
+                       EnsureOpen();
+                       in_Renamed.Norms(f, bytes, offset);
+               }
+               
+               protected internal override void  DoSetNorm(int d, System.String f, byte b)
+               {
+                       in_Renamed.SetNorm(d, f, b);
+               }
+               
+               public override TermEnum Terms()
+               {
+                       EnsureOpen();
+                       return in_Renamed.Terms();
+               }
+               
+               public override TermEnum Terms(Term t)
+               {
+                       EnsureOpen();
+                       return in_Renamed.Terms(t);
+               }
+               
+               public override int DocFreq(Term t)
+               {
+                       EnsureOpen();
+                       return in_Renamed.DocFreq(t);
+               }
+               
+               public override TermDocs TermDocs()
+               {
+                       EnsureOpen();
+                       return in_Renamed.TermDocs();
+               }
+               
+               public override TermDocs TermDocs(Term term)
+               {
+                       EnsureOpen();
+                       return in_Renamed.TermDocs(term);
+               }
+               
+               public override TermPositions TermPositions()
+               {
+                       EnsureOpen();
+                       return in_Renamed.TermPositions();
+               }
+               
+               protected internal override void  DoDelete(int n)
+               {
+                       in_Renamed.DeleteDocument(n);
+               }
+               
+               /// <deprecated> 
+               /// </deprecated>
+        [Obsolete]
+               protected internal override void  DoCommit()
+               {
+                       DoCommit(null);
+               }
+
+        protected internal override void DoCommit(System.Collections.Generic.IDictionary<string, string> commitUserData)
+               {
+                       in_Renamed.Commit(commitUserData);
+               }
+               
+               protected internal override void  DoClose()
+               {
+                       in_Renamed.Close();
+            // NOTE: only needed in case someone had asked for
+            // FieldCache for top-level reader (which is generally
+            // not a good idea):
+            Mono.Lucene.Net.Search.FieldCache_Fields.DEFAULT.Purge(this);
+               }
+
+
+        public override System.Collections.Generic.ICollection<string> GetFieldNames(IndexReader.FieldOption fieldNames)
+               {
+                       EnsureOpen();
+                       return in_Renamed.GetFieldNames(fieldNames);
+               }
+               
+               public override long GetVersion()
+               {
+                       EnsureOpen();
+                       return in_Renamed.GetVersion();
+               }
+               
+               public override bool IsCurrent()
+               {
+                       EnsureOpen();
+                       return in_Renamed.IsCurrent();
+               }
+               
+               public override bool IsOptimized()
+               {
+                       EnsureOpen();
+                       return in_Renamed.IsOptimized();
+               }
+               
+               public override IndexReader[] GetSequentialSubReaders()
+               {
+                       return in_Renamed.GetSequentialSubReaders();
+               }
+               
+               override public System.Object Clone()
+               {
+            System.Diagnostics.Debug.Fail("Port issue:", "Lets see if we need this FilterIndexReader.Clone()"); // {{Aroush-2.9}}
+                       return null;
+               }
+
+        /// <summary>
+        /// If the subclass of FilteredIndexReader modifies the
+        /// contents of the FieldCache, you must override this
+        /// method to provide a different key */
+        ///</summary>
+        public override object GetFieldCacheKey() 
+        {
+            return in_Renamed.GetFieldCacheKey();
+        }
+
+        /// <summary>
+        /// If the subclass of FilteredIndexReader modifies the
+        /// deleted docs, you must override this method to provide
+        /// a different key */
+        /// </summary>
+        public override object GetDeletesCacheKey() 
+        {
+            return in_Renamed.GetDeletesCacheKey();
+        }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/FormatPostingsDocsConsumer.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/FormatPostingsDocsConsumer.cs
new file mode 100644 (file)
index 0000000..d1faa91
--- /dev/null
@@ -0,0 +1,36 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       /// <summary> NOTE: this API is experimental and will likely change</summary>
+       
+       abstract class FormatPostingsDocsConsumer
+       {
+               
+               /// <summary>Adds a new doc in this term.  If this returns null
+               /// then we just skip consuming positions/payloads. 
+               /// </summary>
+               internal abstract FormatPostingsPositionsConsumer AddDoc(int docID, int termDocFreq);
+               
+               /// <summary>Called when we are done adding docs to this term </summary>
+               internal abstract void  Finish();
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/FormatPostingsDocsWriter.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/FormatPostingsDocsWriter.cs
new file mode 100644 (file)
index 0000000..ab09c53
--- /dev/null
@@ -0,0 +1,136 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+/// <summary>Consumes doc & freq, writing them using the current
+/// index file format 
+/// </summary>
+
+using System;
+using IndexOutput = Mono.Lucene.Net.Store.IndexOutput;
+using UnicodeUtil = Mono.Lucene.Net.Util.UnicodeUtil;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       sealed class FormatPostingsDocsWriter:FormatPostingsDocsConsumer
+       {
+               
+               internal IndexOutput out_Renamed;
+               internal FormatPostingsTermsWriter parent;
+               internal FormatPostingsPositionsWriter posWriter;
+               internal DefaultSkipListWriter skipListWriter;
+               internal int skipInterval;
+               internal int totalNumDocs;
+               
+               internal bool omitTermFreqAndPositions;
+               internal bool storePayloads;
+               internal long freqStart;
+               internal FieldInfo fieldInfo;
+               
+               internal FormatPostingsDocsWriter(SegmentWriteState state, FormatPostingsTermsWriter parent):base()
+               {
+                       this.parent = parent;
+                       System.String fileName = IndexFileNames.SegmentFileName(parent.parent.segment, IndexFileNames.FREQ_EXTENSION);
+                       SupportClass.CollectionsHelper.AddIfNotContains(state.flushedFiles, fileName);
+                       out_Renamed = parent.parent.dir.CreateOutput(fileName);
+                       totalNumDocs = parent.parent.totalNumDocs;
+                       
+                       // TODO: abstraction violation
+                       skipInterval = parent.parent.termsOut.skipInterval;
+                       skipListWriter = parent.parent.skipListWriter;
+                       skipListWriter.SetFreqOutput(out_Renamed);
+                       
+                       posWriter = new FormatPostingsPositionsWriter(state, this);
+               }
+               
+               internal void  SetField(FieldInfo fieldInfo)
+               {
+                       this.fieldInfo = fieldInfo;
+                       omitTermFreqAndPositions = fieldInfo.omitTermFreqAndPositions;
+                       storePayloads = fieldInfo.storePayloads;
+                       posWriter.SetField(fieldInfo);
+               }
+               
+               internal int lastDocID;
+               internal int df;
+               
+               /// <summary>Adds a new doc in this term.  If this returns null
+               /// then we just skip consuming positions/payloads. 
+               /// </summary>
+               internal override FormatPostingsPositionsConsumer AddDoc(int docID, int termDocFreq)
+               {
+                       
+                       int delta = docID - lastDocID;
+                       
+                       if (docID < 0 || (df > 0 && delta <= 0))
+                               throw new CorruptIndexException("docs out of order (" + docID + " <= " + lastDocID + " )");
+                       
+                       if ((++df % skipInterval) == 0)
+                       {
+                               // TODO: abstraction violation
+                               skipListWriter.SetSkipData(lastDocID, storePayloads, posWriter.lastPayloadLength);
+                               skipListWriter.BufferSkip(df);
+                       }
+                       
+                       System.Diagnostics.Debug.Assert(docID < totalNumDocs, "docID=" + docID + " totalNumDocs=" + totalNumDocs);
+                       
+                       lastDocID = docID;
+                       if (omitTermFreqAndPositions)
+                               out_Renamed.WriteVInt(delta);
+                       else if (1 == termDocFreq)
+                               out_Renamed.WriteVInt((delta << 1) | 1);
+                       else
+                       {
+                               out_Renamed.WriteVInt(delta << 1);
+                               out_Renamed.WriteVInt(termDocFreq);
+                       }
+                       
+                       return posWriter;
+               }
+               
+               private TermInfo termInfo = new TermInfo(); // minimize consing
+               internal UnicodeUtil.UTF8Result utf8 = new UnicodeUtil.UTF8Result();
+               
+               /// <summary>Called when we are done adding docs to this term </summary>
+               internal override void  Finish()
+               {
+                       long skipPointer = skipListWriter.WriteSkip(out_Renamed);
+                       
+                       // TODO: this is abstraction violation -- we should not
+                       // peek up into parents terms encoding format
+                       termInfo.Set(df, parent.freqStart, parent.proxStart, (int) (skipPointer - parent.freqStart));
+                       
+                       // TODO: we could do this incrementally
+                       UnicodeUtil.UTF16toUTF8(parent.currentTerm, parent.currentTermStart, utf8);
+                       
+                       if (df > 0)
+                       {
+                               parent.termsOut.Add(fieldInfo.number, utf8.result, utf8.length, termInfo);
+                       }
+                       
+                       lastDocID = 0;
+                       df = 0;
+               }
+               
+               internal void  Close()
+               {
+                       out_Renamed.Close();
+                       posWriter.Close();
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/FormatPostingsFieldsConsumer.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/FormatPostingsFieldsConsumer.cs
new file mode 100644 (file)
index 0000000..772aa0f
--- /dev/null
@@ -0,0 +1,39 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       /// <summary>Abstract API that consumes terms, doc, freq, prox and
+       /// payloads postings.  Concrete implementations of this
+       /// actually do "something" with the postings (write it into
+       /// the index in a specific format).
+       /// 
+       /// NOTE: this API is experimental and will likely change
+       /// </summary>
+       abstract class FormatPostingsFieldsConsumer
+       {
+               
+               /// <summary>Add a new field </summary>
+               internal abstract FormatPostingsTermsConsumer AddField(FieldInfo field);
+               
+               /// <summary>Called when we are done adding everything. </summary>
+               internal abstract void  Finish();
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/FormatPostingsFieldsWriter.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/FormatPostingsFieldsWriter.cs
new file mode 100644 (file)
index 0000000..49119e6
--- /dev/null
@@ -0,0 +1,71 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using Directory = Mono.Lucene.Net.Store.Directory;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       sealed class FormatPostingsFieldsWriter:FormatPostingsFieldsConsumer
+       {
+               
+               internal Directory dir;
+               internal System.String segment;
+               internal TermInfosWriter termsOut;
+               internal FieldInfos fieldInfos;
+               internal FormatPostingsTermsWriter termsWriter;
+               internal DefaultSkipListWriter skipListWriter;
+               internal int totalNumDocs;
+               
+               public FormatPostingsFieldsWriter(SegmentWriteState state, FieldInfos fieldInfos):base()
+               {
+                       
+                       dir = state.directory;
+                       segment = state.segmentName;
+                       totalNumDocs = state.numDocs;
+                       this.fieldInfos = fieldInfos;
+                       termsOut = new TermInfosWriter(dir, segment, fieldInfos, state.termIndexInterval);
+                       
+                       // TODO: this is a nasty abstraction violation (that we
+                       // peek down to find freqOut/proxOut) -- we need a
+                       // better abstraction here whereby these child consumers
+                       // can provide skip data or not
+                       skipListWriter = new DefaultSkipListWriter(termsOut.skipInterval, termsOut.maxSkipLevels, totalNumDocs, null, null);
+                       
+                       SupportClass.CollectionsHelper.AddIfNotContains(state.flushedFiles, state.SegmentFileName(IndexFileNames.TERMS_EXTENSION));
+                       SupportClass.CollectionsHelper.AddIfNotContains(state.flushedFiles, state.SegmentFileName(IndexFileNames.TERMS_INDEX_EXTENSION));
+                       
+                       termsWriter = new FormatPostingsTermsWriter(state, this);
+               }
+               
+               /// <summary>Add a new field </summary>
+               internal override FormatPostingsTermsConsumer AddField(FieldInfo field)
+               {
+                       termsWriter.SetField(field);
+                       return termsWriter;
+               }
+               
+               /// <summary>Called when we are done adding everything. </summary>
+               internal override void  Finish()
+               {
+                       termsOut.Close();
+                       termsWriter.Close();
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/FormatPostingsPositionsConsumer.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/FormatPostingsPositionsConsumer.cs
new file mode 100644 (file)
index 0000000..8a721d5
--- /dev/null
@@ -0,0 +1,36 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexInput = Mono.Lucene.Net.Store.IndexInput;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       abstract class FormatPostingsPositionsConsumer
+       {
+               
+               /// <summary>Add a new position &amp; payload.  If payloadLength > 0
+               /// you must read those bytes from the IndexInput. 
+               /// </summary>
+               internal abstract void  AddPosition(int position, byte[] payload, int payloadOffset, int payloadLength);
+               
+               /// <summary>Called when we are done adding positions &amp; payloads </summary>
+               internal abstract void  Finish();
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/FormatPostingsPositionsWriter.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/FormatPostingsPositionsWriter.cs
new file mode 100644 (file)
index 0000000..6fc91eb
--- /dev/null
@@ -0,0 +1,101 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexInput = Mono.Lucene.Net.Store.IndexInput;
+using IndexOutput = Mono.Lucene.Net.Store.IndexOutput;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       sealed class FormatPostingsPositionsWriter:FormatPostingsPositionsConsumer
+       {
+               
+               internal FormatPostingsDocsWriter parent;
+               internal IndexOutput out_Renamed;
+               
+               internal bool omitTermFreqAndPositions;
+               internal bool storePayloads;
+               internal int lastPayloadLength = - 1;
+               
+               internal FormatPostingsPositionsWriter(SegmentWriteState state, FormatPostingsDocsWriter parent)
+               {
+                       this.parent = parent;
+                       omitTermFreqAndPositions = parent.omitTermFreqAndPositions;
+                       if (parent.parent.parent.fieldInfos.HasProx())
+                       {
+                               // At least one field does not omit TF, so create the
+                               // prox file
+                               System.String fileName = IndexFileNames.SegmentFileName(parent.parent.parent.segment, IndexFileNames.PROX_EXTENSION);
+                               SupportClass.CollectionsHelper.AddIfNotContains(state.flushedFiles, fileName);
+                               out_Renamed = parent.parent.parent.dir.CreateOutput(fileName);
+                               parent.skipListWriter.SetProxOutput(out_Renamed);
+                       }
+                       // Every field omits TF so we will write no prox file
+                       else
+                               out_Renamed = null;
+               }
+               
+               internal int lastPosition;
+               
+               /// <summary>Add a new position &amp; payload </summary>
+               internal override void  AddPosition(int position, byte[] payload, int payloadOffset, int payloadLength)
+               {
+                       System.Diagnostics.Debug.Assert(!omitTermFreqAndPositions, "omitTermFreqAndPositions is true");
+                       System.Diagnostics.Debug.Assert(out_Renamed != null);
+                       
+                       int delta = position - lastPosition;
+                       lastPosition = position;
+                       
+                       if (storePayloads)
+                       {
+                               if (payloadLength != lastPayloadLength)
+                               {
+                                       lastPayloadLength = payloadLength;
+                                       out_Renamed.WriteVInt((delta << 1) | 1);
+                                       out_Renamed.WriteVInt(payloadLength);
+                               }
+                               else
+                                       out_Renamed.WriteVInt(delta << 1);
+                               if (payloadLength > 0)
+                                       out_Renamed.WriteBytes(payload, payloadLength);
+                       }
+                       else
+                               out_Renamed.WriteVInt(delta);
+               }
+               
+               internal void  SetField(FieldInfo fieldInfo)
+               {
+                       omitTermFreqAndPositions = fieldInfo.omitTermFreqAndPositions;
+                       storePayloads = omitTermFreqAndPositions?false:fieldInfo.storePayloads;
+               }
+               
+               /// <summary>Called when we are done adding positions &amp; payloads </summary>
+               internal override void  Finish()
+               {
+                       lastPosition = 0;
+                       lastPayloadLength = - 1;
+               }
+               
+               internal void  Close()
+               {
+                       if (out_Renamed != null)
+                               out_Renamed.Close();
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/FormatPostingsTermsConsumer.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/FormatPostingsTermsConsumer.cs
new file mode 100644 (file)
index 0000000..f3eef5e
--- /dev/null
@@ -0,0 +1,52 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using ArrayUtil = Mono.Lucene.Net.Util.ArrayUtil;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       /// <summary> NOTE: this API is experimental and will likely change</summary>
+       
+       abstract class FormatPostingsTermsConsumer
+       {
+               
+               /// <summary>Adds a new term in this field; term ends with U+FFFF
+               /// char 
+               /// </summary>
+               internal abstract FormatPostingsDocsConsumer AddTerm(char[] text, int start);
+               
+               internal char[] termBuffer;
+               internal virtual FormatPostingsDocsConsumer AddTerm(System.String text)
+               {
+                       int len = text.Length;
+                       if (termBuffer == null || termBuffer.Length < 1 + len)
+                               termBuffer = new char[ArrayUtil.GetNextSize(1 + len)];
+               for (int i = 0; i < len; i++)
+               {
+                       termBuffer[i] = (char) text[i];
+               }
+                       termBuffer[len] = (char) (0xffff);
+                       return AddTerm(termBuffer, 0);
+               }
+               
+               /// <summary>Called when we are done adding terms to this field </summary>
+               internal abstract void  Finish();
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/FormatPostingsTermsWriter.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/FormatPostingsTermsWriter.cs
new file mode 100644 (file)
index 0000000..d2a1ff7
--- /dev/null
@@ -0,0 +1,78 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       sealed class FormatPostingsTermsWriter:FormatPostingsTermsConsumer
+       {
+               
+               internal FormatPostingsFieldsWriter parent;
+               internal FormatPostingsDocsWriter docsWriter;
+               internal TermInfosWriter termsOut;
+               internal FieldInfo fieldInfo;
+               
+               internal FormatPostingsTermsWriter(SegmentWriteState state, FormatPostingsFieldsWriter parent):base()
+               {
+                       this.parent = parent;
+                       termsOut = parent.termsOut;
+                       docsWriter = new FormatPostingsDocsWriter(state, this);
+               }
+               
+               internal void  SetField(FieldInfo fieldInfo)
+               {
+                       this.fieldInfo = fieldInfo;
+                       docsWriter.SetField(fieldInfo);
+               }
+               
+               internal char[] currentTerm;
+               internal int currentTermStart;
+               
+               internal long freqStart;
+               internal long proxStart;
+               
+               /// <summary>Adds a new term in this field </summary>
+               internal override FormatPostingsDocsConsumer AddTerm(char[] text, int start)
+               {
+                       currentTerm = text;
+                       currentTermStart = start;
+                       
+                       // TODO: this is abstraction violation -- ideally this
+                       // terms writer is not so "invasive", looking for file
+                       // pointers in its child consumers.
+                       freqStart = docsWriter.out_Renamed.GetFilePointer();
+                       if (docsWriter.posWriter.out_Renamed != null)
+                               proxStart = docsWriter.posWriter.out_Renamed.GetFilePointer();
+                       
+                       parent.skipListWriter.ResetSkip();
+                       
+                       return docsWriter;
+               }
+               
+               /// <summary>Called when we are done adding terms to this field </summary>
+               internal override void  Finish()
+               {
+               }
+               
+               internal void  Close()
+               {
+                       docsWriter.Close();
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/FreqProxFieldMergeState.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/FreqProxFieldMergeState.cs
new file mode 100644 (file)
index 0000000..483bdbf
--- /dev/null
@@ -0,0 +1,116 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       // TODO FI: some of this is "generic" to TermsHash* so we
+       // should factor it out so other consumers don't have to
+       // duplicate this code
+       
+       /// <summary>Used by DocumentsWriter to merge the postings from
+       /// multiple ThreadStates when creating a segment 
+       /// </summary>
+       sealed class FreqProxFieldMergeState
+       {
+               
+               internal FreqProxTermsWriterPerField field;
+               internal int numPostings;
+               internal CharBlockPool charPool;
+               internal RawPostingList[] postings;
+               
+               private FreqProxTermsWriter.PostingList p;
+               internal char[] text;
+               internal int textOffset;
+               
+               private int postingUpto = - 1;
+               
+               internal ByteSliceReader freq = new ByteSliceReader();
+               internal ByteSliceReader prox = new ByteSliceReader();
+               
+               internal int docID;
+               internal int termFreq;
+               
+               public FreqProxFieldMergeState(FreqProxTermsWriterPerField field)
+               {
+                       this.field = field;
+                       this.charPool = field.perThread.termsHashPerThread.charPool;
+                       this.numPostings = field.termsHashPerField.numPostings;
+                       this.postings = field.termsHashPerField.SortPostings();
+               }
+               
+               internal bool NextTerm()
+               {
+                       postingUpto++;
+                       if (postingUpto == numPostings)
+                               return false;
+                       
+                       p = (FreqProxTermsWriter.PostingList) postings[postingUpto];
+                       docID = 0;
+                       
+                       text = charPool.buffers[p.textStart >> DocumentsWriter.CHAR_BLOCK_SHIFT];
+                       textOffset = p.textStart & DocumentsWriter.CHAR_BLOCK_MASK;
+                       
+                       field.termsHashPerField.InitReader(freq, p, 0);
+                       if (!field.fieldInfo.omitTermFreqAndPositions)
+                               field.termsHashPerField.InitReader(prox, p, 1);
+                       
+                       // Should always be true
+                       bool result = NextDoc();
+                       System.Diagnostics.Debug.Assert(result);
+                       
+                       return true;
+               }
+               
+               public bool NextDoc()
+               {
+                       if (freq.Eof())
+                       {
+                               if (p.lastDocCode != - 1)
+                               {
+                                       // Return last doc
+                                       docID = p.lastDocID;
+                                       if (!field.omitTermFreqAndPositions)
+                                               termFreq = p.docFreq;
+                                       p.lastDocCode = - 1;
+                                       return true;
+                               }
+                               // EOF
+                               else
+                                       return false;
+                       }
+                       
+                       int code = freq.ReadVInt();
+                       if (field.omitTermFreqAndPositions)
+                               docID += code;
+                       else
+                       {
+                               docID += SupportClass.Number.URShift(code, 1);
+                               if ((code & 1) != 0)
+                                       termFreq = 1;
+                               else
+                                       termFreq = freq.ReadVInt();
+                       }
+                       
+                       System.Diagnostics.Debug.Assert(docID != p.lastDocID);
+                       
+                       return true;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/FreqProxTermsWriter.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/FreqProxTermsWriter.cs
new file mode 100644 (file)
index 0000000..ca55c75
--- /dev/null
@@ -0,0 +1,322 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexInput = Mono.Lucene.Net.Store.IndexInput;
+using IndexOutput = Mono.Lucene.Net.Store.IndexOutput;
+using UnicodeUtil = Mono.Lucene.Net.Util.UnicodeUtil;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       sealed class FreqProxTermsWriter:TermsHashConsumer
+       {
+               
+               public override TermsHashConsumerPerThread AddThread(TermsHashPerThread perThread)
+               {
+                       return new FreqProxTermsWriterPerThread(perThread);
+               }
+               
+               internal override void  CreatePostings(RawPostingList[] postings, int start, int count)
+               {
+                       int end = start + count;
+                       for (int i = start; i < end; i++)
+                               postings[i] = new PostingList();
+               }
+               
+               private static int compareText(char[] text1, int pos1, char[] text2, int pos2)
+               {
+                       while (true)
+                       {
+                               char c1 = text1[pos1++];
+                               char c2 = text2[pos2++];
+                               if (c1 != c2)
+                               {
+                                       if (0xffff == c2)
+                                               return 1;
+                                       else if (0xffff == c1)
+                                               return - 1;
+                                       else
+                                               return c1 - c2;
+                               }
+                               else if (0xffff == c1)
+                                       return 0;
+                       }
+               }
+               
+               internal override void  CloseDocStore(SegmentWriteState state)
+               {
+               }
+               public override void  Abort()
+               {
+               }
+               
+               
+               // TODO: would be nice to factor out more of this, eg the
+               // FreqProxFieldMergeState, and code to visit all Fields
+               // under the same FieldInfo together, up into TermsHash*.
+               // Other writers would presumably share alot of this...
+               
+               public override void  Flush(System.Collections.IDictionary threadsAndFields, SegmentWriteState state)
+               {
+                       
+                       // Gather all FieldData's that have postings, across all
+                       // ThreadStates
+                       System.Collections.ArrayList allFields = new System.Collections.ArrayList();
+
+            System.Collections.IEnumerator it = new System.Collections.Hashtable(threadsAndFields).GetEnumerator();
+                       while (it.MoveNext())
+                       {
+                               
+                               System.Collections.DictionaryEntry entry = (System.Collections.DictionaryEntry) it.Current;
+                               
+                               System.Collections.ICollection fields = (System.Collections.ICollection) entry.Value;
+                               
+                               System.Collections.IEnumerator fieldsIt = fields.GetEnumerator();
+                               
+                               while (fieldsIt.MoveNext())
+                               {
+                                       FreqProxTermsWriterPerField perField = (FreqProxTermsWriterPerField) ((System.Collections.DictionaryEntry) fieldsIt.Current).Key;
+                                       if (perField.termsHashPerField.numPostings > 0)
+                                               allFields.Add(perField);
+                               }
+                       }
+                       
+                       // Sort by field name
+            allFields.Sort();
+                       int numAllFields = allFields.Count;
+                       
+                       // TODO: allow Lucene user to customize this consumer:
+                       FormatPostingsFieldsConsumer consumer = new FormatPostingsFieldsWriter(state, fieldInfos);
+                       /*
+                       Current writer chain:
+                       FormatPostingsFieldsConsumer
+                       -> IMPL: FormatPostingsFieldsWriter
+                       -> FormatPostingsTermsConsumer
+                       -> IMPL: FormatPostingsTermsWriter
+                       -> FormatPostingsDocConsumer
+                       -> IMPL: FormatPostingsDocWriter
+                       -> FormatPostingsPositionsConsumer
+                       -> IMPL: FormatPostingsPositionsWriter
+                       */
+                       
+                       int start = 0;
+                       while (start < numAllFields)
+                       {
+                               FieldInfo fieldInfo = ((FreqProxTermsWriterPerField) allFields[start]).fieldInfo;
+                               System.String fieldName = fieldInfo.name;
+                               
+                               int end = start + 1;
+                               while (end < numAllFields && ((FreqProxTermsWriterPerField) allFields[end]).fieldInfo.name.Equals(fieldName))
+                                       end++;
+                               
+                               FreqProxTermsWriterPerField[] fields = new FreqProxTermsWriterPerField[end - start];
+                               for (int i = start; i < end; i++)
+                               {
+                                       fields[i - start] = (FreqProxTermsWriterPerField) allFields[i];
+                                       
+                                       // Aggregate the storePayload as seen by the same
+                                       // field across multiple threads
+                                       fieldInfo.storePayloads |= fields[i - start].hasPayloads;
+                               }
+                               
+                               // If this field has postings then add them to the
+                               // segment
+                               AppendPostings(fields, consumer);
+                               
+                               for (int i = 0; i < fields.Length; i++)
+                               {
+                                       TermsHashPerField perField = fields[i].termsHashPerField;
+                                       int numPostings = perField.numPostings;
+                                       perField.Reset();
+                                       perField.ShrinkHash(numPostings);
+                                       fields[i].Reset();
+                               }
+                               
+                               start = end;
+                       }
+
+            it = new System.Collections.Hashtable(threadsAndFields).GetEnumerator();
+                       while (it.MoveNext())
+                       {
+                               System.Collections.DictionaryEntry entry = (System.Collections.DictionaryEntry) it.Current;
+                               FreqProxTermsWriterPerThread perThread = (FreqProxTermsWriterPerThread) entry.Key;
+                               perThread.termsHashPerThread.Reset(true);
+                       }
+                       
+                       consumer.Finish();
+               }
+               
+               private byte[] payloadBuffer;
+               
+               /* Walk through all unique text tokens (Posting
+               * instances) found in this field and serialize them
+               * into a single RAM segment. */
+               internal void  AppendPostings(FreqProxTermsWriterPerField[] fields, FormatPostingsFieldsConsumer consumer)
+               {
+                       
+                       int numFields = fields.Length;
+                       
+                       FreqProxFieldMergeState[] mergeStates = new FreqProxFieldMergeState[numFields];
+                       
+                       for (int i = 0; i < numFields; i++)
+                       {
+                               FreqProxFieldMergeState fms = mergeStates[i] = new FreqProxFieldMergeState(fields[i]);
+                               
+                               System.Diagnostics.Debug.Assert(fms.field.fieldInfo == fields [0].fieldInfo);
+                               
+                               // Should always be true
+                               bool result = fms.NextTerm();
+                               System.Diagnostics.Debug.Assert(result);
+                       }
+                       
+                       FormatPostingsTermsConsumer termsConsumer = consumer.AddField(fields[0].fieldInfo);
+                       
+                       FreqProxFieldMergeState[] termStates = new FreqProxFieldMergeState[numFields];
+                       
+                       bool currentFieldOmitTermFreqAndPositions = fields[0].fieldInfo.omitTermFreqAndPositions;
+                       
+                       while (numFields > 0)
+                       {
+                               
+                               // Get the next term to merge
+                               termStates[0] = mergeStates[0];
+                               int numToMerge = 1;
+                               
+                               for (int i = 1; i < numFields; i++)
+                               {
+                                       char[] text = mergeStates[i].text;
+                                       int textOffset = mergeStates[i].textOffset;
+                                       int cmp = compareText(text, textOffset, termStates[0].text, termStates[0].textOffset);
+                                       
+                                       if (cmp < 0)
+                                       {
+                                               termStates[0] = mergeStates[i];
+                                               numToMerge = 1;
+                                       }
+                                       else if (cmp == 0)
+                                               termStates[numToMerge++] = mergeStates[i];
+                               }
+                               
+                               FormatPostingsDocsConsumer docConsumer = termsConsumer.AddTerm(termStates[0].text, termStates[0].textOffset);
+                               
+                               // Now termStates has numToMerge FieldMergeStates
+                               // which all share the same term.  Now we must
+                               // interleave the docID streams.
+                               while (numToMerge > 0)
+                               {
+                                       
+                                       FreqProxFieldMergeState minState = termStates[0];
+                                       for (int i = 1; i < numToMerge; i++)
+                                               if (termStates[i].docID < minState.docID)
+                                                       minState = termStates[i];
+                                       
+                                       int termDocFreq = minState.termFreq;
+                                       
+                                       FormatPostingsPositionsConsumer posConsumer = docConsumer.AddDoc(minState.docID, termDocFreq);
+                                       
+                                       ByteSliceReader prox = minState.prox;
+                                       
+                                       // Carefully copy over the prox + payload info,
+                                       // changing the format to match Lucene's segment
+                                       // format.
+                                       if (!currentFieldOmitTermFreqAndPositions)
+                                       {
+                                               // omitTermFreqAndPositions == false so we do write positions &
+                                               // payload          
+                                               int position = 0;
+                                               for (int j = 0; j < termDocFreq; j++)
+                                               {
+                                                       int code = prox.ReadVInt();
+                                                       position += (code >> 1);
+                                                       
+                                                       int payloadLength;
+                                                       if ((code & 1) != 0)
+                                                       {
+                                                               // This position has a payload
+                                                               payloadLength = prox.ReadVInt();
+                                                               
+                                                               if (payloadBuffer == null || payloadBuffer.Length < payloadLength)
+                                                                       payloadBuffer = new byte[payloadLength];
+                                                               
+                                                               prox.ReadBytes(payloadBuffer, 0, payloadLength);
+                                                       }
+                                                       else
+                                                               payloadLength = 0;
+                                                       
+                                                       posConsumer.AddPosition(position, payloadBuffer, 0, payloadLength);
+                                               } //End for
+                                               
+                                               posConsumer.Finish();
+                                       }
+                                       
+                                       if (!minState.NextDoc())
+                                       {
+                                               
+                                               // Remove from termStates
+                                               int upto = 0;
+                                               for (int i = 0; i < numToMerge; i++)
+                                                       if (termStates[i] != minState)
+                                                               termStates[upto++] = termStates[i];
+                                               numToMerge--;
+                                               System.Diagnostics.Debug.Assert(upto == numToMerge);
+                                               
+                                               // Advance this state to the next term
+                                               
+                                               if (!minState.NextTerm())
+                                               {
+                                                       // OK, no more terms, so remove from mergeStates
+                                                       // as well
+                                                       upto = 0;
+                                                       for (int i = 0; i < numFields; i++)
+                                                               if (mergeStates[i] != minState)
+                                                                       mergeStates[upto++] = mergeStates[i];
+                                                       numFields--;
+                                                       System.Diagnostics.Debug.Assert(upto == numFields);
+                                               }
+                                       }
+                               }
+                               
+                               docConsumer.Finish();
+                       }
+                       
+                       termsConsumer.Finish();
+               }
+               
+               private TermInfo termInfo = new TermInfo(); // minimize consing
+               
+               internal UnicodeUtil.UTF8Result termsUTF8 = new UnicodeUtil.UTF8Result();
+               
+               internal void  Files(System.Collections.ICollection files)
+               {
+               }
+               
+               internal sealed class PostingList:RawPostingList
+               {
+                       internal int docFreq; // # times this term occurs in the current doc
+                       internal int lastDocID; // Last docID where this term occurred
+                       internal int lastDocCode; // Code for prior doc
+                       internal int lastPosition; // Last position where this term occurred
+               }
+               
+               internal override int BytesPerPosting()
+               {
+                       return RawPostingList.BYTES_SIZE + 4 * DocumentsWriter.INT_NUM_BYTE;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/FreqProxTermsWriterPerField.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/FreqProxTermsWriterPerField.cs
new file mode 100644 (file)
index 0000000..a508e67
--- /dev/null
@@ -0,0 +1,198 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using PayloadAttribute = Mono.Lucene.Net.Analysis.Tokenattributes.PayloadAttribute;
+using Fieldable = Mono.Lucene.Net.Documents.Fieldable;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       // TODO: break into separate freq and prox writers as
+       // codecs; make separate container (tii/tis/skip/*) that can
+       // be configured as any number of files 1..N
+       sealed class FreqProxTermsWriterPerField:TermsHashConsumerPerField, System.IComparable
+       {
+               
+               internal FreqProxTermsWriterPerThread perThread;
+               internal TermsHashPerField termsHashPerField;
+               internal FieldInfo fieldInfo;
+               internal DocumentsWriter.DocState docState;
+               internal FieldInvertState fieldState;
+               internal bool omitTermFreqAndPositions;
+               internal PayloadAttribute payloadAttribute;
+               
+               public FreqProxTermsWriterPerField(TermsHashPerField termsHashPerField, FreqProxTermsWriterPerThread perThread, FieldInfo fieldInfo)
+               {
+                       this.termsHashPerField = termsHashPerField;
+                       this.perThread = perThread;
+                       this.fieldInfo = fieldInfo;
+                       docState = termsHashPerField.docState;
+                       fieldState = termsHashPerField.fieldState;
+                       omitTermFreqAndPositions = fieldInfo.omitTermFreqAndPositions;
+               }
+               
+               internal override int GetStreamCount()
+               {
+                       if (fieldInfo.omitTermFreqAndPositions)
+                               return 1;
+                       else
+                               return 2;
+               }
+               
+               internal override void  Finish()
+               {
+               }
+               
+               internal bool hasPayloads;
+               
+               internal override void  SkippingLongTerm()
+               {
+               }
+               
+               public int CompareTo(System.Object other0)
+               {
+                       FreqProxTermsWriterPerField other = (FreqProxTermsWriterPerField) other0;
+                       return String.CompareOrdinal(fieldInfo.name, other.fieldInfo.name);
+               }
+               
+               internal void  Reset()
+               {
+                       // Record, up front, whether our in-RAM format will be
+                       // with or without term freqs:
+                       omitTermFreqAndPositions = fieldInfo.omitTermFreqAndPositions;
+                       payloadAttribute = null;
+               }
+               
+               internal override bool Start(Fieldable[] fields, int count)
+               {
+                       for (int i = 0; i < count; i++)
+                               if (fields[i].IsIndexed())
+                                       return true;
+                       return false;
+               }
+               
+               internal override void  Start(Fieldable f)
+               {
+                       if (fieldState.attributeSource.HasAttribute(typeof(PayloadAttribute)))
+                       {
+                               payloadAttribute = (PayloadAttribute) fieldState.attributeSource.GetAttribute(typeof(PayloadAttribute));
+                       }
+                       else
+                       {
+                               payloadAttribute = null;
+                       }
+               }
+               
+               internal void  WriteProx(FreqProxTermsWriter.PostingList p, int proxCode)
+               {
+                       Payload payload;
+                       if (payloadAttribute == null)
+                       {
+                               payload = null;
+                       }
+                       else
+                       {
+                               payload = payloadAttribute.GetPayload();
+                       }
+                       
+                       if (payload != null && payload.length > 0)
+                       {
+                               termsHashPerField.WriteVInt(1, (proxCode << 1) | 1);
+                               termsHashPerField.WriteVInt(1, payload.length);
+                               termsHashPerField.WriteBytes(1, payload.data, payload.offset, payload.length);
+                               hasPayloads = true;
+                       }
+                       else
+                               termsHashPerField.WriteVInt(1, proxCode << 1);
+                       p.lastPosition = fieldState.position;
+               }
+               
+               internal override void  NewTerm(RawPostingList p0)
+               {
+                       // First time we're seeing this term since the last
+                       // flush
+                       System.Diagnostics.Debug.Assert(docState.TestPoint("FreqProxTermsWriterPerField.newTerm start"));
+                       FreqProxTermsWriter.PostingList p = (FreqProxTermsWriter.PostingList) p0;
+                       p.lastDocID = docState.docID;
+                       if (omitTermFreqAndPositions)
+                       {
+                               p.lastDocCode = docState.docID;
+                       }
+                       else
+                       {
+                               p.lastDocCode = docState.docID << 1;
+                               p.docFreq = 1;
+                               WriteProx(p, fieldState.position);
+                       }
+               }
+               
+               internal override void  AddTerm(RawPostingList p0)
+               {
+                       
+                       System.Diagnostics.Debug.Assert(docState.TestPoint("FreqProxTermsWriterPerField.addTerm start"));
+                       
+                       FreqProxTermsWriter.PostingList p = (FreqProxTermsWriter.PostingList) p0;
+                       
+                       System.Diagnostics.Debug.Assert(omitTermFreqAndPositions || p.docFreq > 0);
+                       
+                       if (omitTermFreqAndPositions)
+                       {
+                               if (docState.docID != p.lastDocID)
+                               {
+                                       System.Diagnostics.Debug.Assert(docState.docID > p.lastDocID);
+                                       termsHashPerField.WriteVInt(0, p.lastDocCode);
+                                       p.lastDocCode = docState.docID - p.lastDocID;
+                                       p.lastDocID = docState.docID;
+                               }
+                       }
+                       else
+                       {
+                               if (docState.docID != p.lastDocID)
+                               {
+                                       System.Diagnostics.Debug.Assert(docState.docID > p.lastDocID);
+                                       // Term not yet seen in the current doc but previously
+                                       // seen in other doc(s) since the last flush
+                                       
+                                       // Now that we know doc freq for previous doc,
+                                       // write it & lastDocCode
+                                       if (1 == p.docFreq)
+                                               termsHashPerField.WriteVInt(0, p.lastDocCode | 1);
+                                       else
+                                       {
+                                               termsHashPerField.WriteVInt(0, p.lastDocCode);
+                                               termsHashPerField.WriteVInt(0, p.docFreq);
+                                       }
+                                       p.docFreq = 1;
+                                       p.lastDocCode = (docState.docID - p.lastDocID) << 1;
+                                       p.lastDocID = docState.docID;
+                                       WriteProx(p, fieldState.position);
+                               }
+                               else
+                               {
+                                       p.docFreq++;
+                                       WriteProx(p, fieldState.position - p.lastPosition);
+                               }
+                       }
+               }
+               
+               public void  Abort()
+               {
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/FreqProxTermsWriterPerThread.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/FreqProxTermsWriterPerThread.cs
new file mode 100644 (file)
index 0000000..c7c12d3
--- /dev/null
@@ -0,0 +1,52 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       sealed class FreqProxTermsWriterPerThread:TermsHashConsumerPerThread
+       {
+               internal TermsHashPerThread termsHashPerThread;
+               internal DocumentsWriter.DocState docState;
+               
+               public FreqProxTermsWriterPerThread(TermsHashPerThread perThread)
+               {
+                       docState = perThread.docState;
+                       termsHashPerThread = perThread;
+               }
+               
+               public override TermsHashConsumerPerField AddField(TermsHashPerField termsHashPerField, FieldInfo fieldInfo)
+               {
+                       return new FreqProxTermsWriterPerField(termsHashPerField, this, fieldInfo);
+               }
+               
+               public override void  StartDocument()
+               {
+               }
+               
+               public override DocumentsWriter.DocWriter FinishDocument()
+               {
+                       return null;
+               }
+               
+               public override void  Abort()
+               {
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/IndexCommit.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/IndexCommit.cs
new file mode 100644 (file)
index 0000000..7d53296
--- /dev/null
@@ -0,0 +1,119 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using Directory = Mono.Lucene.Net.Store.Directory;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       /// <summary> <p/>Expert: represents a single commit into an index as seen by the
+       /// {@link IndexDeletionPolicy} or {@link IndexReader}.<p/>
+       /// 
+       /// <p/> Changes to the content of an index are made visible
+       /// only after the writer who made that change commits by
+       /// writing a new segments file
+       /// (<code>segments_N</code>). This point in time, when the
+       /// action of writing of a new segments file to the directory
+       /// is completed, is an index commit.<p/>
+       /// 
+       /// <p/>Each index commit point has a unique segments file
+       /// associated with it. The segments file associated with a
+       /// later index commit point would have a larger N.<p/>
+       /// 
+       /// <p/><b>WARNING</b>: This API is a new and experimental and
+       /// may suddenly change. <p/>
+       /// </summary>
+       
+       public abstract class IndexCommit : IndexCommitPoint
+       {
+               
+               /// <summary> Get the segments file (<code>segments_N</code>) associated 
+               /// with this commit point.
+               /// </summary>
+               public abstract System.String GetSegmentsFileName();
+               
+               /// <summary> Returns all index files referenced by this commit point.</summary>
+               public abstract System.Collections.Generic.ICollection<string> GetFileNames();
+               
+               /// <summary> Returns the {@link Directory} for the index.</summary>
+               public abstract Directory GetDirectory();
+               
+               /// <summary> Delete this commit point.  This only applies when using
+               /// the commit point in the context of IndexWriter's
+               /// IndexDeletionPolicy.
+               /// <p/>
+               /// Upon calling this, the writer is notified that this commit 
+               /// point should be deleted. 
+               /// <p/>
+               /// Decision that a commit-point should be deleted is taken by the {@link IndexDeletionPolicy} in effect
+               /// and therefore this should only be called by its {@link IndexDeletionPolicy#onInit onInit()} or 
+               /// {@link IndexDeletionPolicy#onCommit onCommit()} methods.
+               /// </summary>
+        public abstract void Delete();
+
+        public abstract bool IsDeleted();
+               
+               /// <summary> Returns true if this commit is an optimized index.</summary>
+        public abstract bool IsOptimized();
+
+        /// <summary> Two IndexCommits are equal if both their Directory and versions are equal.</summary>
+               public  override bool Equals(System.Object other)
+               {
+                       if (other is IndexCommit)
+                       {
+                               IndexCommit otherCommit = (IndexCommit) other;
+                               return otherCommit.GetDirectory().Equals(GetDirectory()) && otherCommit.GetVersion() == GetVersion();
+                       }
+                       else
+                               return false;
+               }
+               
+               public override int GetHashCode()
+               {
+                       return (int)(GetDirectory().GetHashCode() + GetVersion());
+               }
+               
+               /// <summary>Returns the version for this IndexCommit.  This is the
+               /// same value that {@link IndexReader#getVersion} would
+               /// return if it were opened on this commit. 
+               /// </summary>
+        public abstract long GetVersion();
+               
+               /// <summary>Returns the generation (the _N in segments_N) for this
+               /// IndexCommit 
+               /// </summary>
+        public abstract long GetGeneration();
+               
+               /// <summary>Convenience method that returns the last modified time
+               /// of the segments_N file corresponding to this index
+               /// commit, equivalent to
+               /// getDirectory().fileModified(getSegmentsFileName()). 
+               /// </summary>
+               public virtual long GetTimestamp()
+               {
+                       return GetDirectory().FileModified(GetSegmentsFileName());
+               }
+               
+               /// <summary>Returns userData, previously passed to {@link
+               /// IndexWriter#Commit(Map)} for this commit.  Map is
+               /// String -> String. 
+               /// </summary>
+        public abstract System.Collections.Generic.IDictionary<string, string> GetUserData();
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/IndexCommitPoint.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/IndexCommitPoint.cs
new file mode 100644 (file)
index 0000000..bbafc7d
--- /dev/null
@@ -0,0 +1,48 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       /// <deprecated> Please subclass IndexCommit class instead
+       /// </deprecated>
+    [Obsolete("Please subclass IndexCommit class instead")]
+       public interface IndexCommitPoint
+       {
+               
+               /// <summary> Get the segments file (<code>segments_N</code>) associated 
+               /// with this commit point.
+               /// </summary>
+               System.String GetSegmentsFileName();
+               
+               /// <summary> Returns all index files referenced by this commit point.</summary>
+        System.Collections.Generic.ICollection<string> GetFileNames();
+               
+               /// <summary> Delete this commit point.
+               /// <p/>
+               /// Upon calling this, the writer is notified that this commit 
+               /// point should be deleted. 
+               /// <p/>
+               /// Decision that a commit-point should be deleted is taken by the {@link IndexDeletionPolicy} in effect
+               /// and therefore this should only be called by its {@link IndexDeletionPolicy#onInit onInit()} or 
+               /// {@link IndexDeletionPolicy#onCommit onCommit()} methods.
+               /// </summary>
+               void  Delete();
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/IndexDeletionPolicy.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/IndexDeletionPolicy.cs
new file mode 100644 (file)
index 0000000..f43ecb8
--- /dev/null
@@ -0,0 +1,102 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       /// <summary> <p/>Expert: policy for deletion of stale {@link IndexCommit index commits}. 
+       /// 
+       /// <p/>Implement this interface, and pass it to one
+       /// of the {@link IndexWriter} or {@link IndexReader}
+       /// constructors, to customize when older
+       /// {@link IndexCommit point-in-time commits}
+       /// are deleted from the index directory.  The default deletion policy
+       /// is {@link KeepOnlyLastCommitDeletionPolicy}, which always
+       /// removes old commits as soon as a new commit is done (this
+       /// matches the behavior before 2.2).<p/>
+       /// 
+       /// <p/>One expected use case for this (and the reason why it
+       /// was first created) is to work around problems with an
+       /// index directory accessed via filesystems like NFS because
+       /// NFS does not provide the "delete on last close" semantics
+       /// that Lucene's "point in time" search normally relies on.
+       /// By implementing a custom deletion policy, such as "a
+       /// commit is only removed once it has been stale for more
+       /// than X minutes", you can give your readers time to
+       /// refresh to the new commit before {@link IndexWriter}
+       /// removes the old commits.  Note that doing so will
+       /// increase the storage requirements of the index.  See <a
+       /// target="top"
+       /// href="http://issues.apache.org/jira/browse/LUCENE-710">LUCENE-710</a>
+       /// for details.<p/>
+       /// </summary>
+       
+       public interface IndexDeletionPolicy
+       {
+               
+               /// <summary> <p/>This is called once when a writer is first
+               /// instantiated to give the policy a chance to remove old
+               /// commit points.<p/>
+               /// 
+               /// <p/>The writer locates all index commits present in the 
+               /// index directory and calls this method.  The policy may 
+               /// choose to delete some of the commit points, doing so by
+               /// calling method {@link IndexCommit#delete delete()} 
+               /// of {@link IndexCommit}.<p/>
+               /// 
+               /// <p/><u>Note:</u> the last CommitPoint is the most recent one,
+               /// i.e. the "front index state". Be careful not to delete it,
+               /// unless you know for sure what you are doing, and unless 
+               /// you can afford to lose the index content while doing that. 
+               /// 
+               /// </summary>
+               /// <param name="commits">List of current 
+               /// {@link IndexCommit point-in-time commits},
+               /// sorted by age (the 0th one is the oldest commit).
+               /// </param>
+               void  OnInit(System.Collections.IList commits);
+               
+               /// <summary> <p/>This is called each time the writer completed a commit.
+               /// This gives the policy a chance to remove old commit points
+               /// with each commit.<p/>
+               /// 
+               /// <p/>The policy may now choose to delete old commit points 
+               /// by calling method {@link IndexCommit#delete delete()} 
+               /// of {@link IndexCommit}.<p/>
+               /// 
+               /// <p/>If writer has <code>autoCommit = true</code> then
+               /// this method will in general be called many times during
+               /// one instance of {@link IndexWriter}.  If
+               /// <code>autoCommit = false</code> then this method is
+               /// only called once when {@link IndexWriter#close} is
+               /// called, or not at all if the {@link IndexWriter#abort}
+               /// is called. 
+               /// 
+               /// <p/><u>Note:</u> the last CommitPoint is the most recent one,
+               /// i.e. the "front index state". Be careful not to delete it,
+               /// unless you know for sure what you are doing, and unless 
+               /// you can afford to lose the index content while doing that.
+               /// 
+               /// </summary>
+               /// <param name="commits">List of {@link IndexCommit},
+               /// sorted by age (the 0th one is the oldest commit).
+               /// </param>
+               void  OnCommit(System.Collections.IList commits);
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/IndexFileDeleter.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/IndexFileDeleter.cs
new file mode 100644 (file)
index 0000000..09bc217
--- /dev/null
@@ -0,0 +1,829 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using Directory = Mono.Lucene.Net.Store.Directory;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       /*
+       * This class keeps track of each SegmentInfos instance that
+       * is still "live", either because it corresponds to a
+       * segments_N file in the Directory (a "commit", i.e. a
+       * committed SegmentInfos) or because it's an in-memory
+       * SegmentInfos that a writer is actively updating but has
+       * not yet committed.  This class uses simple reference
+       * counting to map the live SegmentInfos instances to
+       * individual files in the Directory.
+       *
+       * When autoCommit=true, IndexWriter currently commits only
+       * on completion of a merge (though this may change with
+       * time: it is not a guarantee).  When autoCommit=false,
+       * IndexWriter only commits when it is closed.  Regardless
+       * of autoCommit, the user may call IndexWriter.commit() to
+       * force a blocking commit.
+       * 
+       * The same directory file may be referenced by more than
+       * one IndexCommit, i.e. more than one SegmentInfos.
+       * Therefore we count how many commits reference each file.
+       * When all the commits referencing a certain file have been
+       * deleted, the refcount for that file becomes zero, and the
+       * file is deleted.
+       *
+       * A separate deletion policy interface
+       * (IndexDeletionPolicy) is consulted on creation (onInit)
+       * and once per commit (onCommit), to decide when a commit
+       * should be removed.
+       * 
+       * It is the business of the IndexDeletionPolicy to choose
+       * when to delete commit points.  The actual mechanics of
+       * file deletion, retrying, etc, derived from the deletion
+       * of commit points is the business of the IndexFileDeleter.
+       * 
+       * The current default deletion policy is {@link
+       * KeepOnlyLastCommitDeletionPolicy}, which removes all
+       * prior commits when a new commit has completed.  This
+       * matches the behavior before 2.2.
+       *
+       * Note that you must hold the write.lock before
+       * instantiating this class.  It opens segments_N file(s)
+       * directly with no retry logic.
+       */
+       
+       public sealed class IndexFileDeleter
+       {
+               
+               /* Files that we tried to delete but failed (likely
+               * because they are open and we are running on Windows),
+               * so we will retry them again later: */
+               private System.Collections.Generic.IList<string> deletable;
+               
+               /* Reference count for all files in the index.  
+               * Counts how many existing commits reference a file.
+               * Maps String to RefCount (class below) instances: */
+               private System.Collections.Generic.Dictionary<System.String, RefCount> refCounts = new System.Collections.Generic.Dictionary<System.String, RefCount>();
+               
+               /* Holds all commits (segments_N) currently in the index.
+               * This will have just 1 commit if you are using the
+               * default delete policy (KeepOnlyLastCommitDeletionPolicy).
+               * Other policies may leave commit points live for longer
+               * in which case this list would be longer than 1: */
+               private System.Collections.ArrayList commits = new System.Collections.ArrayList();
+               
+               /* Holds files we had incref'd from the previous
+               * non-commit checkpoint: */
+        private System.Collections.Generic.IList<string> lastFiles = new System.Collections.Generic.List<string>();
+               
+               /* Commits that the IndexDeletionPolicy have decided to delete: */
+               private System.Collections.ArrayList commitsToDelete = new System.Collections.ArrayList();
+               
+               private System.IO.StreamWriter infoStream;
+               private Directory directory;
+               private IndexDeletionPolicy policy;
+               private DocumentsWriter docWriter;
+               
+               internal bool startingCommitDeleted;
+        private SegmentInfos lastSegmentInfos;
+
+        private System.Collections.Generic.Dictionary<string, string> synced;
+               
+               /// <summary>Change to true to see details of reference counts when
+               /// infoStream != null 
+               /// </summary>
+               public static bool VERBOSE_REF_COUNTS = false;
+               
+               internal void  SetInfoStream(System.IO.StreamWriter infoStream)
+               {
+                       this.infoStream = infoStream;
+                       if (infoStream != null)
+                       {
+                               Message("setInfoStream deletionPolicy=" + policy);
+                       }
+               }
+               
+               private void  Message(System.String message)
+               {
+            infoStream.WriteLine("IFD [" + new DateTime().ToString() + "; " + SupportClass.ThreadClass.Current().Name + "]: " + message);
+               }
+               
+               /// <summary> Initialize the deleter: find all previous commits in
+               /// the Directory, incref the files they reference, call
+               /// the policy to let it delete commits.  This will remove
+               /// any files not referenced by any of the commits.
+               /// </summary>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+        public IndexFileDeleter(Directory directory, IndexDeletionPolicy policy, SegmentInfos segmentInfos, System.IO.StreamWriter infoStream, DocumentsWriter docWriter, System.Collections.Generic.Dictionary<string, string> synced)
+               {
+                       
+                       this.docWriter = docWriter;
+                       this.infoStream = infoStream;
+            this.synced = synced;
+                       
+                       if (infoStream != null)
+                       {
+                               Message("init: current segments file is \"" + segmentInfos.GetCurrentSegmentFileName() + "\"; deletionPolicy=" + policy);
+                       }
+                       
+                       this.policy = policy;
+                       this.directory = directory;
+                       
+                       // First pass: walk the files and initialize our ref
+                       // counts:
+                       long currentGen = segmentInfos.GetGeneration();
+                       IndexFileNameFilter filter = IndexFileNameFilter.GetFilter();
+                       
+                       System.String[] files = directory.ListAll();
+                       
+                       CommitPoint currentCommitPoint = null;
+                       
+                       for (int i = 0; i < files.Length; i++)
+                       {
+                               
+                               System.String fileName = files[i];
+                               
+                               if (filter.Accept(null, fileName) && !fileName.Equals(IndexFileNames.SEGMENTS_GEN))
+                               {
+                                       
+                                       // Add this file to refCounts with initial count 0:
+                                       GetRefCount(fileName);
+                                       
+                                       if (fileName.StartsWith(IndexFileNames.SEGMENTS))
+                                       {
+                                               
+                                               // This is a commit (segments or segments_N), and
+                                               // it's valid (<= the max gen).  Load it, then
+                                               // incref all files it refers to:
+                        if (infoStream != null)
+                        {
+                            Message("init: load commit \"" + fileName + "\"");
+                        }
+                        SegmentInfos sis = new SegmentInfos();
+                        try
+                        {
+                            sis.Read(directory, fileName);
+                        }
+                        catch (System.IO.FileNotFoundException e)
+                        {
+                            // LUCENE-948: on NFS (and maybe others), if
+                            // you have writers switching back and forth
+                            // between machines, it's very likely that the
+                            // dir listing will be stale and will claim a
+                            // file segments_X exists when in fact it
+                            // doesn't.  So, we catch this and handle it
+                            // as if the file does not exist
+                            if (infoStream != null)
+                            {
+                                Message("init: hit FileNotFoundException when loading commit \"" + fileName + "\"; skipping this commit point");
+                            }
+                            sis = null;
+                        }
+                        catch (System.IO.IOException e)
+                        {
+                            if (SegmentInfos.GenerationFromSegmentsFileName(fileName) <= currentGen)
+                            {
+                                throw e;
+                            }
+                            else
+                            {
+                                // Most likely we are opening an index that
+                                // has an aborted "future" commit, so suppress
+                                // exc in this case
+                                sis = null;
+                            }
+                        }
+                        if (sis != null)
+                        {
+                            CommitPoint commitPoint = new CommitPoint(this,commitsToDelete, directory, sis);
+                            if (sis.GetGeneration() == segmentInfos.GetGeneration())
+                            {
+                                currentCommitPoint = commitPoint;
+                            }
+                            commits.Add(commitPoint);
+                            IncRef(sis, true);
+
+                            if (lastSegmentInfos == null || sis.GetGeneration() > lastSegmentInfos.GetGeneration())
+                            {
+                                lastSegmentInfos = sis;
+                            }
+                                               }
+                                       }
+                               }
+                       }
+                       
+                       if (currentCommitPoint == null)
+                       {
+                               // We did not in fact see the segments_N file
+                               // corresponding to the segmentInfos that was passed
+                               // in.  Yet, it must exist, because our caller holds
+                               // the write lock.  This can happen when the directory
+                               // listing was stale (eg when index accessed via NFS
+                               // client with stale directory listing cache).  So we
+                               // try now to explicitly open this commit point:
+                               SegmentInfos sis = new SegmentInfos();
+                               try
+                               {
+                                       sis.Read(directory, segmentInfos.GetCurrentSegmentFileName());
+                               }
+                               catch (System.IO.IOException e)
+                               {
+                                       throw new CorruptIndexException("failed to locate current segments_N file");
+                               }
+                               if (infoStream != null)
+                                       Message("forced open of current segments file " + segmentInfos.GetCurrentSegmentFileName());
+                               currentCommitPoint = new CommitPoint(this, commitsToDelete, directory, sis);
+                               commits.Add(currentCommitPoint);
+                               IncRef(sis, true);
+                       }
+                       
+                       // We keep commits list in sorted order (oldest to newest):
+                       commits.Sort();
+                       
+                       // Now delete anything with ref count at 0.  These are
+                       // presumably abandoned files eg due to crash of
+                       // IndexWriter.
+                       System.Collections.Generic.IEnumerator<System.Collections.Generic.KeyValuePair<System.String, RefCount>> it = refCounts.GetEnumerator();
+                       while (it.MoveNext())
+                       {
+                               System.String fileName = (System.String) it.Current.Key;
+                               RefCount rc = (RefCount) refCounts[fileName];
+                               if (0 == rc.count)
+                               {
+                                       if (infoStream != null)
+                                       {
+                                               Message("init: removing unreferenced file \"" + fileName + "\"");
+                                       }
+                                       DeleteFile(fileName);
+                               }
+                       }
+                       
+                       // Finally, give policy a chance to remove things on
+                       // startup:
+                       policy.OnInit(commits);
+                       
+                       // Always protect the incoming segmentInfos since
+                       // sometime it may not be the most recent commit
+                       Checkpoint(segmentInfos, false);
+                       
+                       startingCommitDeleted = currentCommitPoint.IsDeleted();
+                       
+                       DeleteCommits();
+               }
+
+        public SegmentInfos GetLastSegmentInfos()
+        {
+            return lastSegmentInfos;
+        }
+               
+               /// <summary> Remove the CommitPoints in the commitsToDelete List by
+               /// DecRef'ing all files from each SegmentInfos.
+               /// </summary>
+               private void  DeleteCommits()
+               {
+                       
+                       int size = commitsToDelete.Count;
+                       
+                       if (size > 0)
+                       {
+                               
+                               // First decref all files that had been referred to by
+                               // the now-deleted commits:
+                               for (int i = 0; i < size; i++)
+                               {
+                                       CommitPoint commit = (CommitPoint) commitsToDelete[i];
+                                       if (infoStream != null)
+                                       {
+                                               Message("deleteCommits: now decRef commit \"" + commit.GetSegmentsFileName() + "\"");
+                                       }
+                                       System.Collections.Generic.IEnumerator<string> it = commit.files.GetEnumerator();
+                                       while (it.MoveNext())
+                                       {
+                                               DecRef(it.Current);
+                                       }
+                               }
+                               commitsToDelete.Clear();
+                               
+                               // Now compact commits to remove deleted ones (preserving the sort):
+                               size = commits.Count;
+                               int readFrom = 0;
+                               int writeTo = 0;
+                               while (readFrom < size)
+                               {
+                                       CommitPoint commit = (CommitPoint) commits[readFrom];
+                                       if (!commit.deleted)
+                                       {
+                                               if (writeTo != readFrom)
+                                               {
+                                                       commits[writeTo] = commits[readFrom];
+                                               }
+                                               writeTo++;
+                                       }
+                                       readFrom++;
+                               }
+                               
+                               while (size > writeTo)
+                               {
+                                       commits.RemoveAt(size - 1);
+                                       size--;
+                               }
+                       }
+               }
+               
+               /// <summary> Writer calls this when it has hit an error and had to
+               /// roll back, to tell us that there may now be
+               /// unreferenced files in the filesystem.  So we re-list
+               /// the filesystem and delete such files.  If segmentName
+               /// is non-null, we will only delete files corresponding to
+               /// that segment.
+               /// </summary>
+               public void  Refresh(System.String segmentName)
+               {
+                       System.String[] files = directory.ListAll();
+                       IndexFileNameFilter filter = IndexFileNameFilter.GetFilter();
+                       System.String segmentPrefix1;
+                       System.String segmentPrefix2;
+                       if (segmentName != null)
+                       {
+                               segmentPrefix1 = segmentName + ".";
+                               segmentPrefix2 = segmentName + "_";
+                       }
+                       else
+                       {
+                               segmentPrefix1 = null;
+                               segmentPrefix2 = null;
+                       }
+                       
+                       for (int i = 0; i < files.Length; i++)
+                       {
+                               System.String fileName = files[i];
+                               if (filter.Accept(null, fileName) && (segmentName == null || fileName.StartsWith(segmentPrefix1) || fileName.StartsWith(segmentPrefix2)) && !refCounts.ContainsKey(fileName) && !fileName.Equals(IndexFileNames.SEGMENTS_GEN))
+                               {
+                                       // Unreferenced file, so remove it
+                                       if (infoStream != null)
+                                       {
+                                               Message("refresh [prefix=" + segmentName + "]: removing newly created unreferenced file \"" + fileName + "\"");
+                                       }
+                                       DeleteFile(fileName);
+                               }
+                       }
+               }
+               
+               public void  Refresh()
+               {
+                       Refresh(null);
+               }
+               
+               public void  Close()
+               {
+                       // DecRef old files from the last checkpoint, if any:
+                       int size = lastFiles.Count;
+                       if (size > 0)
+                       {
+                               for (int i = 0; i < size; i++)
+                                       DecRef(lastFiles[i]);
+                               lastFiles.Clear();
+                       }
+                       
+                       DeletePendingFiles();
+               }
+               
+               private void  DeletePendingFiles()
+               {
+                       if (deletable != null)
+                       {
+                               System.Collections.Generic.IList<string> oldDeletable = deletable;
+                               deletable = null;
+                               int size = oldDeletable.Count;
+                               for (int i = 0; i < size; i++)
+                               {
+                                       if (infoStream != null)
+                                       {
+                                               Message("delete pending file " + oldDeletable[i]);
+                                       }
+                                       DeleteFile(oldDeletable[i]);
+                               }
+                       }
+               }
+               
+               /// <summary> For definition of "check point" see IndexWriter comments:
+               /// "Clarification: Check Points (and commits)".
+               /// 
+               /// Writer calls this when it has made a "consistent
+               /// change" to the index, meaning new files are written to
+               /// the index and the in-memory SegmentInfos have been
+               /// modified to point to those files.
+               /// 
+               /// This may or may not be a commit (segments_N may or may
+               /// not have been written).
+               /// 
+               /// We simply incref the files referenced by the new
+               /// SegmentInfos and decref the files we had previously
+               /// seen (if any).
+               /// 
+               /// If this is a commit, we also call the policy to give it
+               /// a chance to remove other commits.  If any commits are
+               /// removed, we decref their files as well.
+               /// </summary>
+               public void  Checkpoint(SegmentInfos segmentInfos, bool isCommit)
+               {
+                       
+                       if (infoStream != null)
+                       {
+                               Message("now checkpoint \"" + segmentInfos.GetCurrentSegmentFileName() + "\" [" + segmentInfos.Count + " segments " + "; isCommit = " + isCommit + "]");
+                       }
+                       
+                       // Try again now to delete any previously un-deletable
+                       // files (because they were in use, on Windows):
+                       DeletePendingFiles();
+                       
+                       // Incref the files:
+                       IncRef(segmentInfos, isCommit);
+                       
+                       if (isCommit)
+                       {
+                               // Append to our commits list:
+                               commits.Add(new CommitPoint(this, commitsToDelete, directory, segmentInfos));
+                               
+                               // Tell policy so it can remove commits:
+                               policy.OnCommit(commits);
+                               
+                               // Decref files for commits that were deleted by the policy:
+                               DeleteCommits();
+                       }
+                       else
+                       {
+                               
+                               System.Collections.Generic.IList<string> docWriterFiles;
+                               if (docWriter != null)
+                               {
+                                       docWriterFiles = docWriter.OpenFiles();
+                                       if (docWriterFiles != null)
+                                       // We must incRef these files before decRef'ing
+                                       // last files to make sure we don't accidentally
+                                       // delete them:
+                                               IncRef(docWriterFiles);
+                               }
+                               else
+                                       docWriterFiles = null;
+                               
+                               // DecRef old files from the last checkpoint, if any:
+                               int size = lastFiles.Count;
+                               if (size > 0)
+                               {
+                                       for (int i = 0; i < size; i++)
+                                               DecRef(lastFiles[i]);
+                                       lastFiles.Clear();
+                               }
+                               
+                               // Save files so we can decr on next checkpoint/commit:
+                foreach (string fname in segmentInfos.Files(directory, false))
+                {
+                    lastFiles.Add(fname);
+                }
+                               
+                if (docWriterFiles != null)
+                {
+                    foreach (string fname in docWriterFiles)
+                    {
+                        lastFiles.Add(fname);
+                    }
+                }
+                       }
+               }
+               
+               internal void  IncRef(SegmentInfos segmentInfos, bool isCommit)
+               {
+                       // If this is a commit point, also incRef the
+                       // segments_N file:
+                       System.Collections.Generic.IEnumerator<string> it = segmentInfos.Files(directory, isCommit).GetEnumerator();
+                       while (it.MoveNext())
+                       {
+                               IncRef(it.Current);
+                       }
+               }
+               
+               internal void  IncRef(System.Collections.Generic.IList<string> files)
+               {
+                       int size = files.Count;
+                       for (int i = 0; i < size; i++)
+                       {
+                               IncRef((System.String) files[i]);
+                       }
+               }
+               
+               internal void  IncRef(System.String fileName)
+               {
+                       RefCount rc = GetRefCount(fileName);
+                       if (infoStream != null && VERBOSE_REF_COUNTS)
+                       {
+                               Message("  IncRef \"" + fileName + "\": pre-incr count is " + rc.count);
+                       }
+                       rc.IncRef();
+               }
+               
+               internal void  DecRef(System.Collections.Generic.ICollection<string> files)
+               {
+            System.Collections.Generic.IEnumerator<string> it = files.GetEnumerator();
+            while (it.MoveNext())
+            {
+                DecRef(it.Current);
+            }
+               }
+               
+               internal void  DecRef(System.String fileName)
+               {
+                       RefCount rc = GetRefCount(fileName);
+                       if (infoStream != null && VERBOSE_REF_COUNTS)
+                       {
+                               Message("  DecRef \"" + fileName + "\": pre-decr count is " + rc.count);
+                       }
+                       if (0 == rc.DecRef())
+                       {
+                               // This file is no longer referenced by any past
+                               // commit points nor by the in-memory SegmentInfos:
+                               DeleteFile(fileName);
+                               refCounts.Remove(fileName);
+
+                if (synced != null) {
+                    lock(synced) 
+                    {
+                      synced.Remove(fileName);
+                    }
+                }
+                       }
+               }
+               
+               internal void  DecRef(SegmentInfos segmentInfos)
+               {
+                       System.Collections.Generic.IEnumerator<string> it = segmentInfos.Files(directory, false).GetEnumerator();
+                       while (it.MoveNext())
+                       {
+                               DecRef(it.Current);
+                       }
+               }
+
+        public bool Exists(String fileName)
+        {
+            if (!refCounts.ContainsKey(fileName))
+            {
+                return false;
+            }
+            else
+            {
+                return GetRefCount(fileName).count > 0;
+            }
+        }
+               
+               private RefCount GetRefCount(System.String fileName)
+               {
+                       RefCount rc;
+                       if (!refCounts.ContainsKey(fileName))
+                       {
+                               rc = new RefCount(fileName);
+                               refCounts[fileName] = rc;
+                       }
+                       else
+                       {
+                               rc = (RefCount) refCounts[fileName];
+                       }
+                       return rc;
+               }
+               
+               internal void  DeleteFiles(System.Collections.IList files)
+               {
+                       int size = files.Count;
+                       for (int i = 0; i < size; i++)
+                               DeleteFile((System.String) files[i]);
+               }
+               
+               /// <summary>Deletes the specified files, but only if they are new
+               /// (have not yet been incref'd). 
+               /// </summary>
+        internal void DeleteNewFiles(System.Collections.Generic.ICollection<string> files)
+               {
+                       System.Collections.IEnumerator it = files.GetEnumerator();
+                       while (it.MoveNext())
+                       {
+                               System.String fileName = (System.String) it.Current;
+                if (!refCounts.ContainsKey(fileName))
+                {
+                    if (infoStream != null)
+                    {
+                        Message("delete new file \"" + fileName + "\"");
+                    }
+                    DeleteFile(fileName);
+                }
+                       }
+               }
+               
+               internal void  DeleteFile(System.String fileName)
+               {
+                       try
+                       {
+                               if (infoStream != null)
+                               {
+                                       Message("delete \"" + fileName + "\"");
+                               }
+                               directory.DeleteFile(fileName);
+                       }
+                       catch (System.IO.IOException e)
+                       {
+                               // if delete fails
+                               if (directory.FileExists(fileName))
+                               {
+                                       
+                                       // Some operating systems (e.g. Windows) don't
+                                       // permit a file to be deleted while it is opened
+                                       // for read (e.g. by another process or thread). So
+                                       // we assume that when a delete fails it is because
+                                       // the file is open in another process, and queue
+                                       // the file for subsequent deletion.
+                                       
+                                       if (infoStream != null)
+                                       {
+                                               Message("IndexFileDeleter: unable to remove file \"" + fileName + "\": " + e.ToString() + "; Will re-try later.");
+                                       }
+                                       if (deletable == null)
+                                       {
+                        deletable = new System.Collections.Generic.List<string>();
+                                       }
+                                       deletable.Add(fileName); // add to deletable
+                               }
+                       }
+               }
+               
+               /// <summary> Tracks the reference count for a single index file:</summary>
+               sealed private class RefCount
+               {
+                       
+                       // fileName used only for better assert error messages
+                       internal System.String fileName;
+                       internal bool initDone;
+                       internal RefCount(System.String fileName)
+                       {
+                               this.fileName = fileName;
+                       }
+                       
+                       internal int count;
+                       
+                       public int IncRef()
+                       {
+                               if (!initDone)
+                               {
+                                       initDone = true;
+                               }
+                               else
+                               {
+                                       System.Diagnostics.Debug.Assert(count > 0, "RefCount is 0 pre-increment for file " + fileName);
+                               }
+                               return ++count;
+                       }
+                       
+                       public int DecRef()
+                       {
+                               System.Diagnostics.Debug.Assert(count > 0, "RefCount is 0 pre-decrement for file " + fileName);
+                               return --count;
+                       }
+               }
+               
+               /// <summary> Holds details for each commit point.  This class is
+               /// also passed to the deletion policy.  Note: this class
+               /// has a natural ordering that is inconsistent with
+               /// equals.
+               /// </summary>
+               
+               sealed private class CommitPoint:IndexCommit, System.IComparable
+               {
+            private void InitBlock(IndexFileDeleter enclosingInstance)
+            {
+                this.enclosingInstance = enclosingInstance;
+            }
+            private IndexFileDeleter enclosingInstance;
+            public IndexFileDeleter Enclosing_Instance
+            {
+                get
+                {
+                    return enclosingInstance;
+                }
+
+            }
+                       
+                       internal long gen;
+            internal System.Collections.Generic.ICollection<string> files;
+                       internal System.String segmentsFileName;
+                       internal bool deleted;
+                       internal Directory directory;
+                       internal System.Collections.ICollection commitsToDelete;
+                       internal long version;
+                       internal long generation;
+                       internal bool isOptimized;
+            internal System.Collections.Generic.IDictionary<string, string> userData;
+                       
+                       public CommitPoint(IndexFileDeleter enclosingInstance, System.Collections.ICollection commitsToDelete, Directory directory, SegmentInfos segmentInfos)
+                       {
+                               InitBlock(enclosingInstance);
+                               this.directory = directory;
+                               this.commitsToDelete = commitsToDelete;
+                               userData = segmentInfos.GetUserData();
+                               segmentsFileName = segmentInfos.GetCurrentSegmentFileName();
+                               version = segmentInfos.GetVersion();
+                               generation = segmentInfos.GetGeneration();
+                files = segmentInfos.Files(directory, true);
+                               gen = segmentInfos.GetGeneration();
+                               isOptimized = segmentInfos.Count == 1 && !segmentInfos.Info(0).HasDeletions();
+                               
+                               System.Diagnostics.Debug.Assert(!segmentInfos.HasExternalSegments(directory));
+                       }
+
+            public override string ToString()
+            {
+                return "IndexFileDeleter.CommitPoint(" + segmentsFileName + ")";
+            }
+
+                       public override bool IsOptimized()
+                       {
+                               return isOptimized;
+                       }
+                       
+                       public override System.String GetSegmentsFileName()
+                       {
+                               return segmentsFileName;
+                       }
+
+            public override System.Collections.Generic.ICollection<string> GetFileNames()
+                       {
+                               return files;
+                       }
+                       
+                       public override Directory GetDirectory()
+                       {
+                               return directory;
+                       }
+                       
+                       public override long GetVersion()
+                       {
+                               return version;
+                       }
+                       
+                       public override long GetGeneration()
+                       {
+                               return generation;
+                       }
+
+            public override System.Collections.Generic.IDictionary<string, string> GetUserData()
+                       {
+                               return userData;
+                       }
+                       
+                       /// <summary> Called only be the deletion policy, to remove this
+                       /// commit point from the index.
+                       /// </summary>
+                       public override void  Delete()
+                       {
+                               if (!deleted)
+                               {
+                                       deleted = true;
+                                       Enclosing_Instance.commitsToDelete.Add(this);
+                               }
+                       }
+                       
+                       public override bool IsDeleted()
+                       {
+                               return deleted;
+                       }
+                       
+                       public int CompareTo(System.Object obj)
+                       {
+                               CommitPoint commit = (CommitPoint) obj;
+                               if (gen < commit.gen)
+                               {
+                                       return - 1;
+                               }
+                               else if (gen > commit.gen)
+                               {
+                                       return 1;
+                               }
+                               else
+                               {
+                                       return 0;
+                               }
+                       }
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/IndexFileNameFilter.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/IndexFileNameFilter.cs
new file mode 100644 (file)
index 0000000..e399116
--- /dev/null
@@ -0,0 +1,110 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       /// <summary> Filename filter that accept filenames and extensions only created by Lucene.
+       /// 
+       /// </summary>
+       /// <version>  $rcs = ' $Id: Exp $ ' ;
+       /// </version>
+       public class IndexFileNameFilter
+       {
+               
+               private static IndexFileNameFilter singleton = new IndexFileNameFilter();
+        private System.Collections.Hashtable extensions;
+        private System.Collections.Hashtable extensionsInCFS;
+               
+               // Prevent instantiation.
+               private IndexFileNameFilter()
+               {
+            extensions = new System.Collections.Hashtable();
+                       for (int i = 0; i < IndexFileNames.INDEX_EXTENSIONS.Length; i++)
+                       {
+                               extensions.Add(IndexFileNames.INDEX_EXTENSIONS[i], IndexFileNames.INDEX_EXTENSIONS[i]);
+                       }
+            extensionsInCFS = new System.Collections.Hashtable();
+                       for (int i = 0; i < IndexFileNames.INDEX_EXTENSIONS_IN_COMPOUND_FILE.Length; i++)
+                       {
+                               extensionsInCFS.Add(IndexFileNames.INDEX_EXTENSIONS_IN_COMPOUND_FILE[i], IndexFileNames.INDEX_EXTENSIONS_IN_COMPOUND_FILE[i]);
+                       }
+               }
+               
+               /* (non-Javadoc)
+               * @see java.io.FilenameFilter#accept(java.io.File, java.lang.String)
+               */
+               public virtual bool Accept(System.IO.FileInfo dir, System.String name)
+               {
+                       int i = name.LastIndexOf((System.Char) '.');
+                       if (i != - 1)
+                       {
+                               System.String extension = name.Substring(1 + i);
+                               if (extensions.Contains(extension))
+                               {
+                                       return true;
+                               }
+                               else if (extension.StartsWith("f") && (new System.Text.RegularExpressions.Regex("f\\d+")).Match(extension).Success)
+                               {
+                                       return true;
+                               }
+                               else if (extension.StartsWith("s") && (new System.Text.RegularExpressions.Regex("s\\d+")).Match(extension).Success)
+                               {
+                                       return true;
+                               }
+                       }
+                       else
+                       {
+                               if (name.Equals(IndexFileNames.DELETABLE))
+                                       return true;
+                               else if (name.StartsWith(IndexFileNames.SEGMENTS))
+                                       return true;
+                       }
+                       return false;
+               }
+               
+               /// <summary> Returns true if this is a file that would be contained
+               /// in a CFS file.  This function should only be called on
+               /// files that pass the above "accept" (ie, are already
+               /// known to be a Lucene index file).
+               /// </summary>
+               public virtual bool IsCFSFile(System.String name)
+               {
+                       int i = name.LastIndexOf((System.Char) '.');
+                       if (i != - 1)
+                       {
+                               System.String extension = name.Substring(1 + i);
+                               if (extensionsInCFS.Contains(extension))
+                               {
+                                       return true;
+                               }
+                               if (extension.StartsWith("f") && (new System.Text.RegularExpressions.Regex("f\\d+")).Match(extension).Success)
+                               {
+                                       return true;
+                               }
+                       }
+                       return false;
+               }
+               
+               public static IndexFileNameFilter GetFilter()
+               {
+                       return singleton;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/IndexFileNames.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/IndexFileNames.cs
new file mode 100644 (file)
index 0000000..8bc557f
--- /dev/null
@@ -0,0 +1,168 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       /// <summary> Useful constants representing filenames and extensions used by lucene
+       /// 
+       /// </summary>
+       /// <version>  $rcs = ' $Id: Exp $ ' ;
+       /// </version>
+       public sealed class IndexFileNames
+       {
+               
+               /// <summary>Name of the index segment file </summary>
+               public /*internal*/ const System.String SEGMENTS = "segments";
+               
+               /// <summary>Name of the generation reference file name </summary>
+               public /*internal*/ const System.String SEGMENTS_GEN = "segments.gen";
+               
+               /// <summary>Name of the index deletable file (only used in
+               /// pre-lockless indices) 
+               /// </summary>
+               public /*internal*/ const System.String DELETABLE = "deletable";
+               
+               /// <summary>Extension of norms file </summary>
+               public /*internal*/ const System.String NORMS_EXTENSION = "nrm";
+               
+               /// <summary>Extension of freq postings file </summary>
+               public /*internal*/ const System.String FREQ_EXTENSION = "frq";
+               
+               /// <summary>Extension of prox postings file </summary>
+               public /*internal*/ const System.String PROX_EXTENSION = "prx";
+               
+               /// <summary>Extension of terms file </summary>
+               public /*internal*/ const System.String TERMS_EXTENSION = "tis";
+               
+               /// <summary>Extension of terms index file </summary>
+               public /*internal*/ const System.String TERMS_INDEX_EXTENSION = "tii";
+               
+               /// <summary>Extension of stored fields index file </summary>
+               public /*internal*/ const System.String FIELDS_INDEX_EXTENSION = "fdx";
+               
+               /// <summary>Extension of stored fields file </summary>
+               public /*internal*/ const System.String FIELDS_EXTENSION = "fdt";
+               
+               /// <summary>Extension of vectors fields file </summary>
+               public /*internal*/ const System.String VECTORS_FIELDS_EXTENSION = "tvf";
+               
+               /// <summary>Extension of vectors documents file </summary>
+               public /*internal*/ const System.String VECTORS_DOCUMENTS_EXTENSION = "tvd";
+               
+               /// <summary>Extension of vectors index file </summary>
+               public /*internal*/ const System.String VECTORS_INDEX_EXTENSION = "tvx";
+               
+               /// <summary>Extension of compound file </summary>
+               public /*internal*/ const System.String COMPOUND_FILE_EXTENSION = "cfs";
+               
+               /// <summary>Extension of compound file for doc store files</summary>
+               public /*internal*/ const System.String COMPOUND_FILE_STORE_EXTENSION = "cfx";
+               
+               /// <summary>Extension of deletes </summary>
+               internal const System.String DELETES_EXTENSION = "del";
+               
+               /// <summary>Extension of field infos </summary>
+               public /*internal*/ const System.String FIELD_INFOS_EXTENSION = "fnm";
+               
+               /// <summary>Extension of plain norms </summary>
+               public /*internal*/ const System.String PLAIN_NORMS_EXTENSION = "f";
+               
+               /// <summary>Extension of separate norms </summary>
+               public /*internal*/ const System.String SEPARATE_NORMS_EXTENSION = "s";
+               
+               /// <summary>Extension of gen file </summary>
+               public /*internal*/ const System.String GEN_EXTENSION = "gen";
+               
+               /// <summary> This array contains all filename extensions used by
+               /// Lucene's index files, with two exceptions, namely the
+               /// extension made up from <code>.f</code> + a number and
+               /// from <code>.s</code> + a number.  Also note that
+               /// Lucene's <code>segments_N</code> files do not have any
+               /// filename extension.
+               /// </summary>
+               public /*internal*/ static readonly System.String[] INDEX_EXTENSIONS = new System.String[]{COMPOUND_FILE_EXTENSION, FIELD_INFOS_EXTENSION, FIELDS_INDEX_EXTENSION, FIELDS_EXTENSION, TERMS_INDEX_EXTENSION, TERMS_EXTENSION, FREQ_EXTENSION, PROX_EXTENSION, DELETES_EXTENSION, VECTORS_INDEX_EXTENSION, VECTORS_DOCUMENTS_EXTENSION, VECTORS_FIELDS_EXTENSION, GEN_EXTENSION, NORMS_EXTENSION, COMPOUND_FILE_STORE_EXTENSION};
+               
+               /// <summary>File extensions that are added to a compound file
+               /// (same as above, minus "del", "gen", "cfs"). 
+               /// </summary>
+               public /*internal*/ static readonly System.String[] INDEX_EXTENSIONS_IN_COMPOUND_FILE = new System.String[]{FIELD_INFOS_EXTENSION, FIELDS_INDEX_EXTENSION, FIELDS_EXTENSION, TERMS_INDEX_EXTENSION, TERMS_EXTENSION, FREQ_EXTENSION, PROX_EXTENSION, VECTORS_INDEX_EXTENSION, VECTORS_DOCUMENTS_EXTENSION, VECTORS_FIELDS_EXTENSION, NORMS_EXTENSION};
+               
+               public /*internal*/ static readonly System.String[] STORE_INDEX_EXTENSIONS = new System.String[]{VECTORS_INDEX_EXTENSION, VECTORS_FIELDS_EXTENSION, VECTORS_DOCUMENTS_EXTENSION, FIELDS_INDEX_EXTENSION, FIELDS_EXTENSION};
+               
+               public /*internal*/ static readonly System.String[] NON_STORE_INDEX_EXTENSIONS = new System.String[]{FIELD_INFOS_EXTENSION, FREQ_EXTENSION, PROX_EXTENSION, TERMS_EXTENSION, TERMS_INDEX_EXTENSION, NORMS_EXTENSION};
+               
+               /// <summary>File extensions of old-style index files </summary>
+               public /*internal*/ static readonly System.String[] COMPOUND_EXTENSIONS = new System.String[]{FIELD_INFOS_EXTENSION, FREQ_EXTENSION, PROX_EXTENSION, FIELDS_INDEX_EXTENSION, FIELDS_EXTENSION, TERMS_INDEX_EXTENSION, TERMS_EXTENSION};
+               
+               /// <summary>File extensions for term vector support </summary>
+               public /*internal*/ static readonly System.String[] VECTOR_EXTENSIONS = new System.String[]{VECTORS_INDEX_EXTENSION, VECTORS_DOCUMENTS_EXTENSION, VECTORS_FIELDS_EXTENSION};
+               
+               /// <summary> Computes the full file name from base, extension and
+               /// generation.  If the generation is -1, the file name is
+               /// null.  If it's 0, the file name is 
+               /// If it's > 0, the file name is 
+               /// 
+               /// </summary>
+               /// <param name="base">-- main part of the file name
+               /// </param>
+               /// <param name="extension">-- extension of the filename (including .)
+               /// </param>
+               /// <param name="gen">-- generation
+               /// </param>
+               public /*internal*/ static System.String FileNameFromGeneration(System.String base_Renamed, System.String extension, long gen)
+               {
+                       if (gen == SegmentInfo.NO)
+                       {
+                               return null;
+                       }
+                       else if (gen == SegmentInfo.WITHOUT_GEN)
+                       {
+                               return base_Renamed + extension;
+                       }
+                       else
+                       {
+#if !PRE_LUCENE_NET_2_0_0_COMPATIBLE
+                               return base_Renamed + "_" + SupportClass.Number.ToString(gen) + extension;
+#else
+                               return base_Renamed + "_" + System.Convert.ToString(gen, 16) + extension;
+#endif
+                       }
+               }
+               
+               /// <summary> Returns true if the provided filename is one of the doc
+               /// store files (ends with an extension in
+               /// STORE_INDEX_EXTENSIONS).
+               /// </summary>
+               internal static bool IsDocStoreFile(System.String fileName)
+               {
+                       if (fileName.EndsWith(COMPOUND_FILE_STORE_EXTENSION))
+                               return true;
+                       for (int i = 0; i < STORE_INDEX_EXTENSIONS.Length; i++)
+                               if (fileName.EndsWith(STORE_INDEX_EXTENSIONS[i]))
+                                       return true;
+                       return false;
+               }
+               
+               internal static System.String SegmentFileName(System.String segmentName, System.String ext)
+               {
+                       return segmentName + "." + ext;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/IndexModifier.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/IndexModifier.cs
new file mode 100644 (file)
index 0000000..fbc74a0
--- /dev/null
@@ -0,0 +1,691 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using Analyzer = Mono.Lucene.Net.Analysis.Analyzer;
+using Document = Mono.Lucene.Net.Documents.Document;
+using Directory = Mono.Lucene.Net.Store.Directory;
+using FSDirectory = Mono.Lucene.Net.Store.FSDirectory;
+using LockObtainFailedException = Mono.Lucene.Net.Store.LockObtainFailedException;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       /// <summary> <p/>[Note that as of <b>2.1</b>, all but one of the
+       /// methods in this class are available via {@link
+       /// IndexWriter}.  The one method that is not available is
+       /// {@link #DeleteDocument(int)}.]<p/>
+       /// 
+       /// A class to modify an index, i.e. to delete and add documents. This
+       /// class hides {@link IndexReader} and {@link IndexWriter} so that you
+       /// do not need to care about implementation details such as that adding
+       /// documents is done via IndexWriter and deletion is done via IndexReader.
+       /// 
+       /// <p/>Note that you cannot create more than one <code>IndexModifier</code> object
+       /// on the same directory at the same time.
+       /// 
+       /// <p/>Example usage:
+       /// 
+       /// <!-- ======================================================== -->
+       /// <!-- = Java Sourcecode to HTML automatically converted code = -->
+       /// <!-- =   Java2Html Converter V4.1 2004 by Markus Gebhard  markus@jave.de   = -->
+       /// <!-- =     Further information: http://www.java2html.de     = -->
+       /// <div align="left" class="java">
+       /// <table border="0" cellpadding="3" cellspacing="0" bgcolor="#ffffff">
+       /// <tr>
+       /// <!-- start source code -->
+       /// <td nowrap="nowrap" valign="top" align="left">
+       /// <code>
+       /// <font color="#ffffff">&#160;&#160;&#160;&#160;</font><font color="#000000">Analyzer&#160;analyzer&#160;=&#160;</font><font color="#7f0055"><b>new&#160;</b></font><font color="#000000">StandardAnalyzer</font><font color="#000000">()</font><font color="#000000">;</font><br/>
+       /// <font color="#ffffff">&#160;&#160;&#160;&#160;</font><font color="#3f7f5f">//&#160;create&#160;an&#160;index&#160;in&#160;/tmp/index,&#160;overwriting&#160;an&#160;existing&#160;one:</font><br/>
+       /// <font color="#ffffff">&#160;&#160;&#160;&#160;</font><font color="#000000">IndexModifier&#160;indexModifier&#160;=&#160;</font><font color="#7f0055"><b>new&#160;</b></font><font color="#000000">IndexModifier</font><font color="#000000">(</font><font color="#2a00ff">&#34;/tmp/index&#34;</font><font color="#000000">,&#160;analyzer,&#160;</font><font color="#7f0055"><b>true</b></font><font color="#000000">)</font><font color="#000000">;</font><br/>
+       /// <font color="#ffffff">&#160;&#160;&#160;&#160;</font><font color="#000000">Document&#160;doc&#160;=&#160;</font><font color="#7f0055"><b>new&#160;</b></font><font color="#000000">Document</font><font color="#000000">()</font><font color="#000000">;</font><br/>
+       /// <font color="#ffffff">&#160;&#160;&#160;&#160;</font><font color="#000000">doc.add</font><font color="#000000">(</font><font color="#7f0055"><b>new&#160;</b></font><font color="#000000">Field</font><font color="#000000">(</font><font color="#2a00ff">&#34;id&#34;</font><font color="#000000">,&#160;</font><font color="#2a00ff">&#34;1&#34;</font><font color="#000000">,&#160;Field.Store.YES,&#160;Field.Index.NOT_ANALYZED</font><font color="#000000">))</font><font color="#000000">;</font><br/>
+       /// <font color="#ffffff">&#160;&#160;&#160;&#160;</font><font color="#000000">doc.add</font><font color="#000000">(</font><font color="#7f0055"><b>new&#160;</b></font><font color="#000000">Field</font><font color="#000000">(</font><font color="#2a00ff">&#34;body&#34;</font><font color="#000000">,&#160;</font><font color="#2a00ff">&#34;a&#160;simple&#160;test&#34;</font><font color="#000000">,&#160;Field.Store.YES,&#160;Field.Index.ANALYZED</font><font color="#000000">))</font><font color="#000000">;</font><br/>
+       /// <font color="#ffffff">&#160;&#160;&#160;&#160;</font><font color="#000000">indexModifier.addDocument</font><font color="#000000">(</font><font color="#000000">doc</font><font color="#000000">)</font><font color="#000000">;</font><br/>
+       /// <font color="#ffffff">&#160;&#160;&#160;&#160;</font><font color="#7f0055"><b>int&#160;</b></font><font color="#000000">deleted&#160;=&#160;indexModifier.delete</font><font color="#000000">(</font><font color="#7f0055"><b>new&#160;</b></font><font color="#000000">Term</font><font color="#000000">(</font><font color="#2a00ff">&#34;id&#34;</font><font color="#000000">,&#160;</font><font color="#2a00ff">&#34;1&#34;</font><font color="#000000">))</font><font color="#000000">;</font><br/>
+       /// <font color="#ffffff">&#160;&#160;&#160;&#160;</font><font color="#000000">System.out.println</font><font color="#000000">(</font><font color="#2a00ff">&#34;Deleted&#160;&#34;&#160;</font><font color="#000000">+&#160;deleted&#160;+&#160;</font><font color="#2a00ff">&#34;&#160;document&#34;</font><font color="#000000">)</font><font color="#000000">;</font><br/>
+       /// <font color="#ffffff">&#160;&#160;&#160;&#160;</font><font color="#000000">indexModifier.flush</font><font color="#000000">()</font><font color="#000000">;</font><br/>
+       /// <font color="#ffffff">&#160;&#160;&#160;&#160;</font><font color="#000000">System.out.println</font><font color="#000000">(</font><font color="#000000">indexModifier.docCount</font><font color="#000000">()&#160;</font><font color="#000000">+&#160;</font><font color="#2a00ff">&#34;&#160;docs&#160;in&#160;index&#34;</font><font color="#000000">)</font><font color="#000000">;</font><br/>
+       /// <font color="#ffffff">&#160;&#160;&#160;&#160;</font><font color="#000000">indexModifier.close</font><font color="#000000">()</font><font color="#000000">;</font></code>
+       /// </td>
+       /// <!-- end source code -->
+       /// </tr>
+       /// </table>
+       /// </div>
+       /// <!-- =       END of automatically generated HTML code       = -->
+       /// <!-- ======================================================== -->
+       /// 
+       /// <p/>Not all methods of IndexReader and IndexWriter are offered by this
+       /// class. If you need access to additional methods, either use those classes
+       /// directly or implement your own class that extends <code>IndexModifier</code>.
+       /// 
+       /// <p/>Although an instance of this class can be used from more than one
+       /// thread, you will not get the best performance. You might want to use
+       /// IndexReader and IndexWriter directly for that (but you will need to
+       /// care about synchronization yourself then).
+       /// 
+       /// <p/>While you can freely mix calls to add() and delete() using this class,
+       /// you should batch you calls for best performance. For example, if you
+       /// want to update 20 documents, you should first delete all those documents,
+       /// then add all the new documents.
+       /// 
+       /// </summary>
+       /// <deprecated> Please use {@link IndexWriter} instead.
+       /// </deprecated>
+    [Obsolete("Please use IndexWriter instead.")]
+       public class IndexModifier
+       {
+               private void  InitBlock()
+               {
+                       maxBufferedDocs = IndexWriter.DEFAULT_MAX_BUFFERED_DOCS;
+                       maxFieldLength = IndexWriter.DEFAULT_MAX_FIELD_LENGTH;
+                       mergeFactor = IndexWriter.DEFAULT_MERGE_FACTOR;
+               }
+               
+               protected internal IndexWriter indexWriter = null;
+               protected internal IndexReader indexReader = null;
+               
+               protected internal Directory directory = null;
+               protected internal Analyzer analyzer = null;
+               protected internal bool open = false, closeDir = false;
+               
+               // Lucene defaults:
+               protected internal System.IO.StreamWriter infoStream = null;
+               protected internal bool useCompoundFile = true;
+               protected internal int maxBufferedDocs;
+               protected internal int maxFieldLength;
+               protected internal int mergeFactor;
+               
+               /// <summary> Open an index with write access.
+               /// 
+               /// </summary>
+               /// <param name="directory">the index directory
+               /// </param>
+               /// <param name="analyzer">the analyzer to use for adding new documents
+               /// </param>
+               /// <param name="create"><code>true</code> to create the index or overwrite the existing one;
+               /// <code>false</code> to append to the existing index
+               /// </param>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  LockObtainFailedException if another writer </throws>
+               /// <summary>  has this index open (<code>write.lock</code> could not
+               /// be obtained)
+               /// </summary>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               public IndexModifier(Directory directory, Analyzer analyzer, bool create)
+               {
+                       InitBlock();
+                       Init(directory, analyzer, create);
+               }
+               
+               /// <summary> Open an index with write access.
+               /// 
+               /// </summary>
+               /// <param name="dirName">the index directory
+               /// </param>
+               /// <param name="analyzer">the analyzer to use for adding new documents
+               /// </param>
+               /// <param name="create"><code>true</code> to create the index or overwrite the existing one;
+               /// <code>false</code> to append to the existing index
+               /// </param>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  LockObtainFailedException if another writer </throws>
+               /// <summary>  has this index open (<code>write.lock</code> could not
+               /// be obtained)
+               /// </summary>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               public IndexModifier(System.String dirName, Analyzer analyzer, bool create)
+               {
+                       InitBlock();
+                       Directory dir = FSDirectory.GetDirectory(dirName);
+                       this.closeDir = true;
+                       Init(dir, analyzer, create);
+               }
+               
+               /// <summary> Open an index with write access.
+               /// 
+               /// </summary>
+               /// <param name="file">the index directory
+               /// </param>
+               /// <param name="analyzer">the analyzer to use for adding new documents
+               /// </param>
+               /// <param name="create"><code>true</code> to create the index or overwrite the existing one;
+               /// <code>false</code> to append to the existing index
+               /// </param>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  LockObtainFailedException if another writer </throws>
+               /// <summary>  has this index open (<code>write.lock</code> could not
+               /// be obtained)
+               /// </summary>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               public IndexModifier(System.IO.FileInfo file, Analyzer analyzer, bool create)
+               {
+                       InitBlock();
+                       Directory dir = FSDirectory.GetDirectory(file);
+                       this.closeDir = true;
+                       Init(dir, analyzer, create);
+               }
+               
+               /// <summary> Initialize an IndexWriter.</summary>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  LockObtainFailedException if another writer </throws>
+               /// <summary>  has this index open (<code>write.lock</code> could not
+               /// be obtained)
+               /// </summary>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               protected internal virtual void  Init(Directory directory, Analyzer analyzer, bool create)
+               {
+                       this.directory = directory;
+                       lock (this.directory)
+                       {
+                               this.analyzer = analyzer;
+                               indexWriter = new IndexWriter(directory, analyzer, create, IndexWriter.MaxFieldLength.LIMITED);
+                               open = true;
+                       }
+               }
+               
+               /// <summary> Throw an IllegalStateException if the index is closed.</summary>
+               /// <throws>  IllegalStateException </throws>
+               protected internal virtual void  AssureOpen()
+               {
+                       if (!open)
+                       {
+                               throw new System.SystemException("Index is closed");
+                       }
+               }
+               
+               /// <summary> Close the IndexReader and open an IndexWriter.</summary>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  LockObtainFailedException if another writer </throws>
+               /// <summary>  has this index open (<code>write.lock</code> could not
+               /// be obtained)
+               /// </summary>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               protected internal virtual void  CreateIndexWriter()
+               {
+                       if (indexWriter == null)
+                       {
+                               if (indexReader != null)
+                               {
+                                       indexReader.Close();
+                                       indexReader = null;
+                               }
+                               indexWriter = new IndexWriter(directory, analyzer, false, new IndexWriter.MaxFieldLength(maxFieldLength));
+                               // IndexModifier cannot use ConcurrentMergeScheduler
+                               // because it synchronizes on the directory which can
+                               // cause deadlock
+                               indexWriter.SetMergeScheduler(new SerialMergeScheduler());
+                               indexWriter.SetInfoStream(infoStream);
+                               indexWriter.SetUseCompoundFile(useCompoundFile);
+                               if (maxBufferedDocs != IndexWriter.DISABLE_AUTO_FLUSH)
+                                       indexWriter.SetMaxBufferedDocs(maxBufferedDocs);
+                               indexWriter.SetMergeFactor(mergeFactor);
+                       }
+               }
+               
+               /// <summary> Close the IndexWriter and open an IndexReader.</summary>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               protected internal virtual void  CreateIndexReader()
+               {
+                       if (indexReader == null)
+                       {
+                               if (indexWriter != null)
+                               {
+                                       indexWriter.Close();
+                                       indexWriter = null;
+                               }
+                               indexReader = IndexReader.Open(directory);
+                       }
+               }
+               
+               /// <summary> Make sure all changes are written to disk.</summary>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  LockObtainFailedException if another writer </throws>
+               /// <summary>  has this index open (<code>write.lock</code> could not
+               /// be obtained)
+               /// </summary>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               public virtual void  Flush()
+               {
+                       lock (directory)
+                       {
+                               AssureOpen();
+                               if (indexWriter != null)
+                               {
+                                       indexWriter.Close();
+                                       indexWriter = null;
+                                       CreateIndexWriter();
+                               }
+                               else
+                               {
+                                       indexReader.Close();
+                                       indexReader = null;
+                                       CreateIndexReader();
+                               }
+                       }
+               }
+               
+               /// <summary> Adds a document to this index, using the provided analyzer instead of the
+               /// one specific in the constructor.  If the document contains more than
+               /// {@link #SetMaxFieldLength(int)} terms for a given field, the remainder are
+               /// discarded.
+               /// </summary>
+               /// <seealso cref="IndexWriter.AddDocument(Document, Analyzer)">
+               /// </seealso>
+               /// <throws>  IllegalStateException if the index is closed </throws>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  LockObtainFailedException if another writer </throws>
+               /// <summary>  has this index open (<code>write.lock</code> could not
+               /// be obtained)
+               /// </summary>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               public virtual void  AddDocument(Document doc, Analyzer docAnalyzer)
+               {
+                       lock (directory)
+                       {
+                               AssureOpen();
+                               CreateIndexWriter();
+                               if (docAnalyzer != null)
+                                       indexWriter.AddDocument(doc, docAnalyzer);
+                               else
+                                       indexWriter.AddDocument(doc);
+                       }
+               }
+               
+               /// <summary> Adds a document to this index.  If the document contains more than
+               /// {@link #SetMaxFieldLength(int)} terms for a given field, the remainder are
+               /// discarded.
+               /// </summary>
+               /// <seealso cref="IndexWriter.AddDocument(Document)">
+               /// </seealso>
+               /// <throws>  IllegalStateException if the index is closed </throws>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  LockObtainFailedException if another writer </throws>
+               /// <summary>  has this index open (<code>write.lock</code> could not
+               /// be obtained)
+               /// </summary>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               public virtual void  AddDocument(Document doc)
+               {
+                       AddDocument(doc, null);
+               }
+               
+               /// <summary> Deletes all documents containing <code>term</code>.
+               /// This is useful if one uses a document field to hold a unique ID string for
+               /// the document.  Then to delete such a document, one merely constructs a
+               /// term with the appropriate field and the unique ID string as its text and
+               /// passes it to this method.  Returns the number of documents deleted.
+               /// </summary>
+               /// <returns> the number of documents deleted
+               /// </returns>
+               /// <seealso cref="IndexReader.DeleteDocuments(Term)">
+               /// </seealso>
+               /// <throws>  IllegalStateException if the index is closed </throws>
+               /// <throws>  StaleReaderException if the index has changed </throws>
+               /// <summary>  since this reader was opened
+               /// </summary>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  LockObtainFailedException if another writer </throws>
+               /// <summary>  has this index open (<code>write.lock</code> could not
+               /// be obtained)
+               /// </summary>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               public virtual int DeleteDocuments(Term term)
+               {
+                       lock (directory)
+                       {
+                               AssureOpen();
+                               CreateIndexReader();
+                               return indexReader.DeleteDocuments(term);
+                       }
+               }
+               
+               /// <summary> Deletes the document numbered <code>docNum</code>.</summary>
+               /// <seealso cref="IndexReader.DeleteDocument(int)">
+               /// </seealso>
+               /// <throws>  StaleReaderException if the index has changed </throws>
+               /// <summary>  since this reader was opened
+               /// </summary>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  LockObtainFailedException if another writer </throws>
+               /// <summary>  has this index open (<code>write.lock</code> could not
+               /// be obtained)
+               /// </summary>
+               /// <throws>  IllegalStateException if the index is closed </throws>
+               public virtual void  DeleteDocument(int docNum)
+               {
+                       lock (directory)
+                       {
+                               AssureOpen();
+                               CreateIndexReader();
+                               indexReader.DeleteDocument(docNum);
+                       }
+               }
+               
+               
+               /// <summary> Returns the number of documents currently in this
+               /// index.  If the writer is currently open, this returns
+               /// {@link IndexWriter#DocCount()}, else {@link
+               /// IndexReader#NumDocs()}.  But, note that {@link
+               /// IndexWriter#DocCount()} does not take deletions into
+               /// account, unlike {@link IndexReader#numDocs}.
+               /// </summary>
+               /// <throws>  IllegalStateException if the index is closed </throws>
+               public virtual int DocCount()
+               {
+                       lock (directory)
+                       {
+                               AssureOpen();
+                               if (indexWriter != null)
+                               {
+                                       return indexWriter.DocCount();
+                               }
+                               else
+                               {
+                                       return indexReader.NumDocs();
+                               }
+                       }
+               }
+               
+               /// <summary> Merges all segments together into a single segment, optimizing an index
+               /// for search.
+               /// </summary>
+               /// <seealso cref="IndexWriter.Optimize()">
+               /// </seealso>
+               /// <throws>  IllegalStateException if the index is closed </throws>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  LockObtainFailedException if another writer </throws>
+               /// <summary>  has this index open (<code>write.lock</code> could not
+               /// be obtained)
+               /// </summary>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               public virtual void  Optimize()
+               {
+                       lock (directory)
+                       {
+                               AssureOpen();
+                               CreateIndexWriter();
+                               indexWriter.Optimize();
+                       }
+               }
+               
+               /// <summary> If non-null, information about merges and a message when
+               /// {@link #GetMaxFieldLength()} is reached will be printed to this.
+               /// <p/>Example: <tt>index.setInfoStream(System.err);</tt>
+               /// </summary>
+               /// <seealso cref="IndexWriter.SetInfoStream(PrintStream)">
+               /// </seealso>
+               /// <throws>  IllegalStateException if the index is closed </throws>
+               public virtual void  SetInfoStream(System.IO.StreamWriter infoStream)
+               {
+                       lock (directory)
+                       {
+                               AssureOpen();
+                               if (indexWriter != null)
+                               {
+                                       indexWriter.SetInfoStream(infoStream);
+                               }
+                               this.infoStream = infoStream;
+                       }
+               }
+               
+               /// <seealso cref="IndexModifier.SetInfoStream(PrintStream)">
+               /// </seealso>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  LockObtainFailedException if another writer </throws>
+               /// <summary>  has this index open (<code>write.lock</code> could not
+               /// be obtained)
+               /// </summary>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               public virtual System.IO.StreamWriter GetInfoStream()
+               {
+                       lock (directory)
+                       {
+                               AssureOpen();
+                               CreateIndexWriter();
+                               return indexWriter.GetInfoStream();
+                       }
+               }
+               
+               /// <summary> Setting to turn on usage of a compound file. When on, multiple files
+               /// for each segment are merged into a single file once the segment creation
+               /// is finished. This is done regardless of what directory is in use.
+               /// </summary>
+               /// <seealso cref="IndexWriter.SetUseCompoundFile(boolean)">
+               /// </seealso>
+               /// <throws>  IllegalStateException if the index is closed </throws>
+               public virtual void  SetUseCompoundFile(bool useCompoundFile)
+               {
+                       lock (directory)
+                       {
+                               AssureOpen();
+                               if (indexWriter != null)
+                               {
+                                       indexWriter.SetUseCompoundFile(useCompoundFile);
+                               }
+                               this.useCompoundFile = useCompoundFile;
+                       }
+               }
+               
+               /// <seealso cref="IndexModifier.SetUseCompoundFile(boolean)">
+               /// </seealso>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  LockObtainFailedException if another writer </throws>
+               /// <summary>  has this index open (<code>write.lock</code> could not
+               /// be obtained)
+               /// </summary>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               public virtual bool GetUseCompoundFile()
+               {
+                       lock (directory)
+                       {
+                               AssureOpen();
+                               CreateIndexWriter();
+                               return indexWriter.GetUseCompoundFile();
+                       }
+               }
+               
+               /// <summary> The maximum number of terms that will be indexed for a single field in a
+               /// document.  This limits the amount of memory required for indexing, so that
+               /// collections with very large files will not crash the indexing process by
+               /// running out of memory.<p/>
+               /// Note that this effectively truncates large documents, excluding from the
+               /// index terms that occur further in the document.  If you know your source
+               /// documents are large, be sure to set this value high enough to accommodate
+               /// the expected size.  If you set it to Integer.MAX_VALUE, then the only limit
+               /// is your memory, but you should anticipate an OutOfMemoryError.<p/>
+               /// By default, no more than 10,000 terms will be indexed for a field.
+               /// </summary>
+               /// <seealso cref="IndexWriter.SetMaxFieldLength(int)">
+               /// </seealso>
+               /// <throws>  IllegalStateException if the index is closed </throws>
+               public virtual void  SetMaxFieldLength(int maxFieldLength)
+               {
+                       lock (directory)
+                       {
+                               AssureOpen();
+                               if (indexWriter != null)
+                               {
+                                       indexWriter.SetMaxFieldLength(maxFieldLength);
+                               }
+                               this.maxFieldLength = maxFieldLength;
+                       }
+               }
+               
+               /// <seealso cref="IndexModifier.SetMaxFieldLength(int)">
+               /// </seealso>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  LockObtainFailedException if another writer </throws>
+               /// <summary>  has this index open (<code>write.lock</code> could not
+               /// be obtained)
+               /// </summary>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               public virtual int GetMaxFieldLength()
+               {
+                       lock (directory)
+                       {
+                               AssureOpen();
+                               CreateIndexWriter();
+                               return indexWriter.GetMaxFieldLength();
+                       }
+               }
+               
+               /// <summary> Determines the minimal number of documents required before the buffered
+               /// in-memory documents are merging and a new Segment is created.
+               /// Since Documents are merged in a {@link Mono.Lucene.Net.Store.RAMDirectory},
+               /// large value gives faster indexing.  At the same time, mergeFactor limits
+               /// the number of files open in a FSDirectory.
+               /// 
+               /// <p/>The default value is 10.
+               /// 
+               /// </summary>
+               /// <seealso cref="IndexWriter.SetMaxBufferedDocs(int)">
+               /// </seealso>
+               /// <throws>  IllegalStateException if the index is closed </throws>
+               /// <throws>  IllegalArgumentException if maxBufferedDocs is smaller than 2 </throws>
+               public virtual void  SetMaxBufferedDocs(int maxBufferedDocs)
+               {
+                       lock (directory)
+                       {
+                               AssureOpen();
+                               if (indexWriter != null)
+                               {
+                                       indexWriter.SetMaxBufferedDocs(maxBufferedDocs);
+                               }
+                               this.maxBufferedDocs = maxBufferedDocs;
+                       }
+               }
+               
+               /// <seealso cref="IndexModifier.SetMaxBufferedDocs(int)">
+               /// </seealso>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  LockObtainFailedException if another writer </throws>
+               /// <summary>  has this index open (<code>write.lock</code> could not
+               /// be obtained)
+               /// </summary>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               public virtual int GetMaxBufferedDocs()
+               {
+                       lock (directory)
+                       {
+                               AssureOpen();
+                               CreateIndexWriter();
+                               return indexWriter.GetMaxBufferedDocs();
+                       }
+               }
+               
+               /// <summary> Determines how often segment indices are merged by addDocument().  With
+               /// smaller values, less RAM is used while indexing, and searches on
+               /// unoptimized indices are faster, but indexing speed is slower.  With larger
+               /// values, more RAM is used during indexing, and while searches on unoptimized
+               /// indices are slower, indexing is faster.  Thus larger values (&gt; 10) are best
+               /// for batch index creation, and smaller values (&lt; 10) for indices that are
+               /// interactively maintained.
+               /// <p/>This must never be less than 2.  The default value is 10.
+               /// 
+               /// </summary>
+               /// <seealso cref="IndexWriter.SetMergeFactor(int)">
+               /// </seealso>
+               /// <throws>  IllegalStateException if the index is closed </throws>
+               public virtual void  SetMergeFactor(int mergeFactor)
+               {
+                       lock (directory)
+                       {
+                               AssureOpen();
+                               if (indexWriter != null)
+                               {
+                                       indexWriter.SetMergeFactor(mergeFactor);
+                               }
+                               this.mergeFactor = mergeFactor;
+                       }
+               }
+               
+               /// <seealso cref="IndexModifier.SetMergeFactor(int)">
+               /// </seealso>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  LockObtainFailedException if another writer </throws>
+               /// <summary>  has this index open (<code>write.lock</code> could not
+               /// be obtained)
+               /// </summary>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               public virtual int GetMergeFactor()
+               {
+                       lock (directory)
+                       {
+                               AssureOpen();
+                               CreateIndexWriter();
+                               return indexWriter.GetMergeFactor();
+                       }
+               }
+               
+               /// <summary> Close this index, writing all pending changes to disk.
+               /// 
+               /// </summary>
+               /// <throws>  IllegalStateException if the index has been closed before already </throws>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               public virtual void  Close()
+               {
+                       lock (directory)
+                       {
+                               if (!open)
+                                       throw new System.SystemException("Index is closed already");
+                               if (indexWriter != null)
+                               {
+                                       indexWriter.Close();
+                                       indexWriter = null;
+                               }
+                               else if (indexReader != null)
+                               {
+                                       indexReader.Close();
+                                       indexReader = null;
+                               }
+                               open = false;
+                               if (closeDir)
+                               {
+                                       directory.Close();
+                               }
+                               closeDir = false;
+                       }
+               }
+               
+               public override System.String ToString()
+               {
+                       return "Index@" + directory;
+               }
+               
+               /*
+               // used as an example in the javadoc:
+               public static void main(String[] args) throws IOException {
+               Analyzer analyzer = new StandardAnalyzer();
+               // create an index in /tmp/index, overwriting an existing one:
+               IndexModifier indexModifier = new IndexModifier("/tmp/index", analyzer, true);
+               Document doc = new Document();
+               doc.add(new Fieldable("id", "1", Fieldable.Store.YES, Fieldable.Index.NOT_ANALYZED));
+               doc.add(new Fieldable("body", "a simple test", Fieldable.Store.YES, Fieldable.Index.ANALYZED));
+               indexModifier.addDocument(doc);
+               int deleted = indexModifier.delete(new Term("id", "1"));
+               System.out.println("Deleted " + deleted + " document");
+               indexModifier.flush();
+               System.out.println(indexModifier.docCount() + " docs in index");
+               indexModifier.close();
+               }*/
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/IndexReader.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/IndexReader.cs
new file mode 100644 (file)
index 0000000..395e862
--- /dev/null
@@ -0,0 +1,1846 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using Document = Mono.Lucene.Net.Documents.Document;
+using FieldSelector = Mono.Lucene.Net.Documents.FieldSelector;
+using Mono.Lucene.Net.Store;
+using Similarity = Mono.Lucene.Net.Search.Similarity;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       /// <summary>IndexReader is an abstract class, providing an interface for accessing an
+       /// index.  Search of an index is done entirely through this abstract interface,
+       /// so that any subclass which implements it is searchable.
+       /// <p/> Concrete subclasses of IndexReader are usually constructed with a call to
+       /// one of the static <code>open()</code> methods, e.g. {@link
+       /// #Open(String, boolean)}.
+       /// <p/> For efficiency, in this API documents are often referred to via
+       /// <i>document numbers</i>, non-negative integers which each name a unique
+       /// document in the index.  These document numbers are ephemeral--they may change
+       /// as documents are added to and deleted from an index.  Clients should thus not
+       /// rely on a given document having the same number between sessions.
+       /// <p/> An IndexReader can be opened on a directory for which an IndexWriter is
+       /// opened already, but it cannot be used to delete documents from the index then.
+       /// <p/>
+       /// <b>NOTE</b>: for backwards API compatibility, several methods are not listed 
+       /// as abstract, but have no useful implementations in this base class and 
+       /// instead always throw UnsupportedOperationException.  Subclasses are 
+       /// strongly encouraged to override these methods, but in many cases may not 
+       /// need to.
+       /// <p/>
+       /// <p/>
+       /// <b>NOTE</b>: as of 2.4, it's possible to open a read-only
+       /// IndexReader using one of the static open methods that
+       /// accepts the boolean readOnly parameter.  Such a reader has
+       /// better concurrency as it's not necessary to synchronize on
+       /// the isDeleted method.  Currently the default for readOnly
+       /// is false, meaning if not specified you will get a
+       /// read/write IndexReader.  But in 3.0 this default will
+       /// change to true, meaning you must explicitly specify false
+       /// if you want to make changes with the resulting IndexReader.
+       /// <p/>
+       /// <a name="thread-safety"></a><p/><b>NOTE</b>: {@link
+       /// <code>IndexReader</code>} instances are completely thread
+       /// safe, meaning multiple threads can call any of its methods,
+       /// concurrently.  If your application requires external
+       /// synchronization, you should <b>not</b> synchronize on the
+       /// <code>IndexReader</code> instance; use your own
+       /// (non-Lucene) objects instead.
+       /// </summary>
+       /// <version>  $Id: IndexReader.java 826049 2009-10-16 19:28:55Z mikemccand $
+       /// </version>
+       public abstract class IndexReader : System.ICloneable, System.IDisposable
+       {
+               private class AnonymousClassFindSegmentsFile:SegmentInfos.FindSegmentsFile
+               {
+                       private void  InitBlock(Mono.Lucene.Net.Store.Directory directory2)
+                       {
+                               this.directory2 = directory2;
+                       }
+                       private Mono.Lucene.Net.Store.Directory directory2;
+                       internal AnonymousClassFindSegmentsFile(Mono.Lucene.Net.Store.Directory directory2, Mono.Lucene.Net.Store.Directory Param1):base(Param1)
+                       {
+                               InitBlock(directory2);
+                       }
+                       public override System.Object DoBody(System.String segmentFileName)
+                       {
+                               return (long) directory2.FileModified(segmentFileName);
+                       }
+               }
+               
+               /// <summary> Constants describing field properties, for example used for
+               /// {@link IndexReader#GetFieldNames(FieldOption)}.
+               /// </summary>
+               public sealed class FieldOption
+               {
+                       private System.String option;
+                       internal FieldOption()
+                       {
+                       }
+                       internal FieldOption(System.String option)
+                       {
+                               this.option = option;
+                       }
+                       public override System.String ToString()
+                       {
+                               return this.option;
+                       }
+                       /// <summary>All fields </summary>
+                       public static readonly FieldOption ALL = new FieldOption("ALL");
+                       /// <summary>All indexed fields </summary>
+                       public static readonly FieldOption INDEXED = new FieldOption("INDEXED");
+                       /// <summary>All fields that store payloads </summary>
+                       public static readonly FieldOption STORES_PAYLOADS = new FieldOption("STORES_PAYLOADS");
+                       /// <summary>All fields that omit tf </summary>
+                       public static readonly FieldOption OMIT_TERM_FREQ_AND_POSITIONS = new FieldOption("OMIT_TERM_FREQ_AND_POSITIONS");
+                       /// <deprecated> Renamed to {@link #OMIT_TERM_FREQ_AND_POSITIONS} 
+                       /// </deprecated>
+            [Obsolete("Renamed to OMIT_TERM_FREQ_AND_POSITIONS")]
+                       public static readonly FieldOption OMIT_TF;
+                       /// <summary>All fields which are not indexed </summary>
+                       public static readonly FieldOption UNINDEXED = new FieldOption("UNINDEXED");
+                       /// <summary>All fields which are indexed with termvectors enabled </summary>
+                       public static readonly FieldOption INDEXED_WITH_TERMVECTOR = new FieldOption("INDEXED_WITH_TERMVECTOR");
+                       /// <summary>All fields which are indexed but don't have termvectors enabled </summary>
+                       public static readonly FieldOption INDEXED_NO_TERMVECTOR = new FieldOption("INDEXED_NO_TERMVECTOR");
+                       /// <summary>All fields with termvectors enabled. Please note that only standard termvector fields are returned </summary>
+                       public static readonly FieldOption TERMVECTOR = new FieldOption("TERMVECTOR");
+                       /// <summary>All fields with termvectors with position values enabled </summary>
+                       public static readonly FieldOption TERMVECTOR_WITH_POSITION = new FieldOption("TERMVECTOR_WITH_POSITION");
+                       /// <summary>All fields with termvectors with offset values enabled </summary>
+                       public static readonly FieldOption TERMVECTOR_WITH_OFFSET = new FieldOption("TERMVECTOR_WITH_OFFSET");
+                       /// <summary>All fields with termvectors with offset values and position values enabled </summary>
+                       public static readonly FieldOption TERMVECTOR_WITH_POSITION_OFFSET = new FieldOption("TERMVECTOR_WITH_POSITION_OFFSET");
+                       static FieldOption()
+                       {
+                               OMIT_TF = OMIT_TERM_FREQ_AND_POSITIONS;
+                       }
+               }
+               
+               private bool closed;
+               protected internal bool hasChanges;
+               
+               private int refCount;
+               
+               internal static int DEFAULT_TERMS_INDEX_DIVISOR = 1;
+               
+               private bool disableFakeNorms = false;
+               
+               /// <summary>Expert: returns the current refCount for this reader </summary>
+               public virtual int GetRefCount()
+               {
+                       lock (this)
+                       {
+                               return refCount;
+                       }
+               }
+               
+               /// <summary> Expert: increments the refCount of this IndexReader
+               /// instance.  RefCounts are used to determine when a
+               /// reader can be closed safely, i.e. as soon as there are
+               /// no more references.  Be sure to always call a
+               /// corresponding {@link #decRef}, in a finally clause;
+               /// otherwise the reader may never be closed.  Note that
+               /// {@link #close} simply calls decRef(), which means that
+               /// the IndexReader will not really be closed until {@link
+               /// #decRef} has been called for all outstanding
+               /// references.
+               /// 
+               /// </summary>
+               /// <seealso cref="decRef">
+               /// </seealso>
+               public virtual void  IncRef()
+               {
+                       lock (this)
+                       {
+                               System.Diagnostics.Debug.Assert(refCount > 0);
+                               EnsureOpen();
+                               refCount++;
+                       }
+               }
+               
+               /// <summary> Expert: decreases the refCount of this IndexReader
+               /// instance.  If the refCount drops to 0, then pending
+               /// changes (if any) are committed to the index and this
+               /// reader is closed.
+               /// 
+               /// </summary>
+               /// <throws>  IOException in case an IOException occurs in commit() or doClose() </throws>
+               /// <summary> 
+               /// </summary>
+               /// <seealso cref="incRef">
+               /// </seealso>
+               public virtual void  DecRef()
+               {
+                       lock (this)
+                       {
+                               System.Diagnostics.Debug.Assert(refCount > 0);
+                               EnsureOpen();
+                               if (refCount == 1)
+                               {
+                                       Commit();
+                                       DoClose();
+                               }
+                               refCount--;
+                       }
+               }
+               
+               /// <deprecated> will be deleted when IndexReader(Directory) is deleted
+               /// </deprecated>
+               /// <seealso cref="Directory()">
+               /// </seealso>
+        [Obsolete("will be deleted when IndexReader(Directory) is deleted")]
+               private Directory directory;
+               
+               /// <summary> Legacy Constructor for backwards compatibility.
+               /// 
+               /// <p/>
+               /// This Constructor should not be used, it exists for backwards 
+               /// compatibility only to support legacy subclasses that did not "own" 
+               /// a specific directory, but needed to specify something to be returned 
+               /// by the directory() method.  Future subclasses should delegate to the 
+               /// no arg constructor and implement the directory() method as appropriate.
+               /// 
+               /// </summary>
+               /// <param name="directory">Directory to be returned by the directory() method
+               /// </param>
+               /// <seealso cref="Directory()">
+               /// </seealso>
+               /// <deprecated> - use IndexReader()
+               /// </deprecated>
+        [Obsolete("- use IndexReader()")]
+               protected internal IndexReader(Directory directory):this()
+               {
+                       this.directory = directory;
+               }
+               
+               protected internal IndexReader()
+               {
+                       refCount = 1;
+               }
+               
+               /// <throws>  AlreadyClosedException if this IndexReader is closed </throws>
+               protected internal void  EnsureOpen()
+               {
+                       if (refCount <= 0)
+                       {
+                               throw new AlreadyClosedException("this IndexReader is closed");
+                       }
+               }
+               
+               /// <summary>Returns a read/write IndexReader reading the index in an FSDirectory in the named
+               /// path.
+               /// </summary>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               /// <deprecated> Use {@link #Open(Directory, boolean)} instead. 
+               /// This method will be removed in the 3.0 release.
+               /// 
+               /// </deprecated>
+               /// <param name="path">the path to the index directory 
+               /// </param>
+        [Obsolete("Use Open(Directory, boolean) instead. This method will be removed in the 3.0 release.")]
+               public static IndexReader Open(System.String path)
+               {
+                       return Open(path, false);
+               }
+               
+               /// <summary>Returns an IndexReader reading the index in an
+               /// FSDirectory in the named path.  You should pass
+               /// readOnly=true, since it gives much better concurrent
+               /// performance, unless you intend to do write operations
+               /// (delete documents or change norms) with the reader.
+               /// </summary>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               /// <param name="path">the path to the index directory
+               /// </param>
+               /// <param name="readOnly">true if this should be a readOnly
+               /// reader
+               /// </param>
+               /// <deprecated> Use {@link #Open(Directory, boolean)} instead.
+               /// This method will be removed in the 3.0 release.
+               /// 
+               /// </deprecated>
+        [Obsolete("Use Open(Directory, bool) instead. This method will be removed in the 3.0 release.")]
+               public static IndexReader Open(System.String path, bool readOnly)
+               {
+                       Directory dir = FSDirectory.GetDirectory(path);
+                       IndexReader r = null;
+                       try
+                       {
+                               r = Open(dir, null, null, readOnly, DEFAULT_TERMS_INDEX_DIVISOR);
+                       }
+                       finally
+                       {
+                               if (r == null)
+                                       dir.Close();
+                       }
+                       return new DirectoryOwningReader(r);
+               }
+               
+               /// <summary>Returns a read/write IndexReader reading the index in an FSDirectory in the named
+               /// path.
+               /// </summary>
+               /// <param name="path">the path to the index directory
+               /// </param>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               /// <deprecated> Use {@link #Open(Directory, boolean)} instead.
+               /// This method will be removed in the 3.0 release.
+               /// 
+               /// </deprecated>
+        [Obsolete("Use Open(Directory, bool) instead.This method will be removed in the 3.0 release.")]
+               public static IndexReader Open(System.IO.FileInfo path)
+               {
+                       return Open(path, false);
+               }
+               
+               /// <summary>Returns an IndexReader reading the index in an
+               /// FSDirectory in the named path.  You should pass
+               /// readOnly=true, since it gives much better concurrent
+               /// performance, unless you intend to do write operations
+               /// (delete documents or change norms) with the reader.
+               /// </summary>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               /// <param name="path">the path to the index directory
+               /// </param>
+               /// <param name="readOnly">true if this should be a readOnly
+               /// reader
+               /// </param>
+               /// <deprecated> Use {@link #Open(Directory, boolean)} instead.
+               /// This method will be removed in the 3.0 release.
+               /// 
+               /// </deprecated>
+        [Obsolete("Use Open(Directory, bool) instead. This method will be removed in the 3.0 release.")]
+               public static IndexReader Open(System.IO.FileInfo path, bool readOnly)
+               {
+                       Directory dir = FSDirectory.GetDirectory(path);
+                       IndexReader r = null;
+                       try
+                       {
+                               r = Open(dir, null, null, readOnly, DEFAULT_TERMS_INDEX_DIVISOR);
+                       }
+                       finally
+                       {
+                               if (r == null)
+                                       dir.Close();
+                       }
+                       return new DirectoryOwningReader(r);
+               }
+               
+               /// <summary>Returns a read/write IndexReader reading the index in
+               /// the given Directory.
+               /// </summary>
+               /// <param name="directory">the index directory
+               /// </param>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               /// <deprecated> Use {@link #Open(Directory, boolean)} instead
+               /// This method will be removed in the 3.0 release.
+               /// 
+               /// </deprecated>
+        [Obsolete("Use Open(Directory, bool) instead. This method will be removed in the 3.0 release.")]
+               public static IndexReader Open(Directory directory)
+               {
+                       return Open(directory, null, null, false, DEFAULT_TERMS_INDEX_DIVISOR);
+               }
+               
+               /// <summary>Returns an IndexReader reading the index in the given
+               /// Directory.  You should pass readOnly=true, since it
+               /// gives much better concurrent performance, unless you
+               /// intend to do write operations (delete documents or
+               /// change norms) with the reader.
+               /// </summary>
+               /// <param name="directory">the index directory
+               /// </param>
+               /// <param name="readOnly">true if no changes (deletions, norms) will be made with this IndexReader
+               /// </param>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               public static IndexReader Open(Directory directory, bool readOnly)
+               {
+                       return Open(directory, null, null, readOnly, DEFAULT_TERMS_INDEX_DIVISOR);
+               }
+               
+               /// <summary>Expert: returns a read/write IndexReader reading the index in the given
+               /// {@link IndexCommit}.
+               /// </summary>
+               /// <param name="commit">the commit point to open
+               /// </param>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <deprecated> Use {@link #Open(IndexCommit, boolean)} instead.
+               /// This method will be removed in the 3.0 release.
+               /// 
+               /// </deprecated>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+        [Obsolete("Use Open(IndexCommit, bool) instead. This method will be removed in the 3.0 release.")]
+               public static IndexReader Open(IndexCommit commit)
+               {
+                       return Open(commit.GetDirectory(), null, commit, false, DEFAULT_TERMS_INDEX_DIVISOR);
+               }
+               
+               /// <summary>Expert: returns an IndexReader reading the index in the given
+               /// {@link IndexCommit}.  You should pass readOnly=true, since it
+               /// gives much better concurrent performance, unless you
+               /// intend to do write operations (delete documents or
+               /// change norms) with the reader.
+               /// </summary>
+               /// <param name="commit">the commit point to open
+               /// </param>
+               /// <param name="readOnly">true if no changes (deletions, norms) will be made with this IndexReader
+               /// </param>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               public static IndexReader Open(IndexCommit commit, bool readOnly)
+               {
+                       return Open(commit.GetDirectory(), null, commit, readOnly, DEFAULT_TERMS_INDEX_DIVISOR);
+               }
+               
+               /// <summary>Expert: returns a read/write IndexReader reading the index in the given
+               /// Directory, with a custom {@link IndexDeletionPolicy}.
+               /// </summary>
+               /// <param name="directory">the index directory
+               /// </param>
+               /// <param name="deletionPolicy">a custom deletion policy (only used
+               /// if you use this reader to perform deletes or to set
+               /// norms); see {@link IndexWriter} for details.
+               /// </param>
+               /// <deprecated> Use {@link #Open(Directory, IndexDeletionPolicy, boolean)} instead.
+               /// This method will be removed in the 3.0 release.
+               /// 
+               /// </deprecated>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+        [Obsolete("Use Open(Directory, IndexDeletionPolicy, bool) instead. This method will be removed in the 3.0 release.")]
+               public static IndexReader Open(Directory directory, IndexDeletionPolicy deletionPolicy)
+               {
+                       return Open(directory, deletionPolicy, null, false, DEFAULT_TERMS_INDEX_DIVISOR);
+               }
+               
+               /// <summary>Expert: returns an IndexReader reading the index in
+               /// the given Directory, with a custom {@link
+               /// IndexDeletionPolicy}.  You should pass readOnly=true,
+               /// since it gives much better concurrent performance,
+               /// unless you intend to do write operations (delete
+               /// documents or change norms) with the reader.
+               /// </summary>
+               /// <param name="directory">the index directory
+               /// </param>
+               /// <param name="deletionPolicy">a custom deletion policy (only used
+               /// if you use this reader to perform deletes or to set
+               /// norms); see {@link IndexWriter} for details.
+               /// </param>
+               /// <param name="readOnly">true if no changes (deletions, norms) will be made with this IndexReader
+               /// </param>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               public static IndexReader Open(Directory directory, IndexDeletionPolicy deletionPolicy, bool readOnly)
+               {
+                       return Open(directory, deletionPolicy, null, readOnly, DEFAULT_TERMS_INDEX_DIVISOR);
+               }
+               
+               /// <summary>Expert: returns an IndexReader reading the index in
+               /// the given Directory, with a custom {@link
+               /// IndexDeletionPolicy}.  You should pass readOnly=true,
+               /// since it gives much better concurrent performance,
+               /// unless you intend to do write operations (delete
+               /// documents or change norms) with the reader.
+               /// </summary>
+               /// <param name="directory">the index directory
+               /// </param>
+               /// <param name="deletionPolicy">a custom deletion policy (only used
+               /// if you use this reader to perform deletes or to set
+               /// norms); see {@link IndexWriter} for details.
+               /// </param>
+               /// <param name="readOnly">true if no changes (deletions, norms) will be made with this IndexReader
+               /// </param>
+               /// <param name="termInfosIndexDivisor">Subsamples which indexed
+               /// terms are loaded into RAM. This has the same effect as {@link
+               /// IndexWriter#setTermIndexInterval} except that setting
+               /// must be done at indexing time while this setting can be
+               /// set per reader.  When set to N, then one in every
+               /// N*termIndexInterval terms in the index is loaded into
+               /// memory.  By setting this to a value > 1 you can reduce
+               /// memory usage, at the expense of higher latency when
+               /// loading a TermInfo.  The default value is 1.  Set this
+               /// to -1 to skip loading the terms index entirely.
+               /// </param>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               public static IndexReader Open(Directory directory, IndexDeletionPolicy deletionPolicy, bool readOnly, int termInfosIndexDivisor)
+               {
+                       return Open(directory, deletionPolicy, null, readOnly, termInfosIndexDivisor);
+               }
+               
+               /// <summary>Expert: returns a read/write IndexReader reading the index in the given
+               /// Directory, using a specific commit and with a custom
+               /// {@link IndexDeletionPolicy}.
+               /// </summary>
+               /// <param name="commit">the specific {@link IndexCommit} to open;
+               /// see {@link IndexReader#listCommits} to list all commits
+               /// in a directory
+               /// </param>
+               /// <param name="deletionPolicy">a custom deletion policy (only used
+               /// if you use this reader to perform deletes or to set
+               /// norms); see {@link IndexWriter} for details.
+               /// </param>
+               /// <deprecated> Use {@link #Open(IndexCommit, IndexDeletionPolicy, boolean)} instead.
+               /// This method will be removed in the 3.0 release.
+               /// 
+               /// </deprecated>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+        [Obsolete("Use Open(IndexCommit, IndexDeletionPolicy, bool) instead. This method will be removed in the 3.0 release.")]
+               public static IndexReader Open(IndexCommit commit, IndexDeletionPolicy deletionPolicy)
+               {
+                       return Open(commit.GetDirectory(), deletionPolicy, commit, false, DEFAULT_TERMS_INDEX_DIVISOR);
+               }
+               
+               /// <summary>Expert: returns an IndexReader reading the index in
+               /// the given Directory, using a specific commit and with
+               /// a custom {@link IndexDeletionPolicy}.  You should pass
+               /// readOnly=true, since it gives much better concurrent
+               /// performance, unless you intend to do write operations
+               /// (delete documents or change norms) with the reader.
+               /// </summary>
+               /// <param name="commit">the specific {@link IndexCommit} to open;
+               /// see {@link IndexReader#listCommits} to list all commits
+               /// in a directory
+               /// </param>
+               /// <param name="deletionPolicy">a custom deletion policy (only used
+               /// if you use this reader to perform deletes or to set
+               /// norms); see {@link IndexWriter} for details.
+               /// </param>
+               /// <param name="readOnly">true if no changes (deletions, norms) will be made with this IndexReader
+               /// </param>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               public static IndexReader Open(IndexCommit commit, IndexDeletionPolicy deletionPolicy, bool readOnly)
+               {
+                       return Open(commit.GetDirectory(), deletionPolicy, commit, readOnly, DEFAULT_TERMS_INDEX_DIVISOR);
+               }
+               
+               /// <summary>Expert: returns an IndexReader reading the index in
+               /// the given Directory, using a specific commit and with
+               /// a custom {@link IndexDeletionPolicy}.  You should pass
+               /// readOnly=true, since it gives much better concurrent
+               /// performance, unless you intend to do write operations
+               /// (delete documents or change norms) with the reader.
+               /// </summary>
+               /// <param name="commit">the specific {@link IndexCommit} to open;
+               /// see {@link IndexReader#listCommits} to list all commits
+               /// in a directory
+               /// </param>
+               /// <param name="deletionPolicy">a custom deletion policy (only used
+               /// if you use this reader to perform deletes or to set
+               /// norms); see {@link IndexWriter} for details.
+               /// </param>
+               /// <param name="readOnly">true if no changes (deletions, norms) will be made with this IndexReader
+               /// </param>
+               /// <param name="termInfosIndexDivisor">Subsambles which indexed
+               /// terms are loaded into RAM. This has the same effect as {@link
+               /// IndexWriter#setTermIndexInterval} except that setting
+               /// must be done at indexing time while this setting can be
+               /// set per reader.  When set to N, then one in every
+               /// N*termIndexInterval terms in the index is loaded into
+               /// memory.  By setting this to a value > 1 you can reduce
+               /// memory usage, at the expense of higher latency when
+               /// loading a TermInfo.  The default value is 1.  Set this
+               /// to -1 to skip loading the terms index entirely.
+               /// </param>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               public static IndexReader Open(IndexCommit commit, IndexDeletionPolicy deletionPolicy, bool readOnly, int termInfosIndexDivisor)
+               {
+                       return Open(commit.GetDirectory(), deletionPolicy, commit, readOnly, termInfosIndexDivisor);
+               }
+               
+               private static IndexReader Open(Directory directory, IndexDeletionPolicy deletionPolicy, IndexCommit commit, bool readOnly, int termInfosIndexDivisor)
+               {
+                       return DirectoryReader.Open(directory, deletionPolicy, commit, readOnly, termInfosIndexDivisor);
+               }
+               
+               /// <summary> Refreshes an IndexReader if the index has changed since this instance 
+               /// was (re)opened. 
+               /// <p/>
+               /// Opening an IndexReader is an expensive operation. This method can be used
+               /// to refresh an existing IndexReader to reduce these costs. This method 
+               /// tries to only load segments that have changed or were created after the 
+               /// IndexReader was (re)opened.
+               /// <p/>
+               /// If the index has not changed since this instance was (re)opened, then this
+               /// call is a NOOP and returns this instance. Otherwise, a new instance is 
+               /// returned. The old instance is <b>not</b> closed and remains usable.<br/>
+               /// <p/>   
+               /// If the reader is reopened, even though they share
+               /// resources internally, it's safe to make changes
+               /// (deletions, norms) with the new reader.  All shared
+               /// mutable state obeys "copy on write" semantics to ensure
+               /// the changes are not seen by other readers.
+               /// <p/>
+               /// You can determine whether a reader was actually reopened by comparing the
+               /// old instance with the instance returned by this method: 
+               /// <pre>
+               /// IndexReader reader = ... 
+               /// ...
+               /// IndexReader newReader = r.reopen();
+               /// if (newReader != reader) {
+               /// ...     // reader was reopened
+               /// reader.close(); 
+               /// }
+               /// reader = newReader;
+               /// ...
+               /// </pre>
+               /// 
+               /// Be sure to synchronize that code so that other threads,
+               /// if present, can never use reader after it has been
+               /// closed and before it's switched to newReader.
+               /// 
+               /// <p/><b>NOTE</b>: If this reader is a near real-time
+               /// reader (obtained from {@link IndexWriter#GetReader()},
+               /// reopen() will simply call writer.getReader() again for
+               /// you, though this may change in the future.
+               /// 
+               /// </summary>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               public virtual IndexReader Reopen()
+               {
+                       lock (this)
+                       {
+                               throw new System.NotSupportedException("This reader does not support reopen().");
+                       }
+               }
+               
+               
+               /// <summary>Just like {@link #Reopen()}, except you can change the
+               /// readOnly of the original reader.  If the index is
+               /// unchanged but readOnly is different then a new reader
+               /// will be returned. 
+               /// </summary>
+               public virtual IndexReader Reopen(bool openReadOnly)
+               {
+                       lock (this)
+                       {
+                               throw new System.NotSupportedException("This reader does not support reopen().");
+                       }
+               }
+               
+               /// <summary>Expert: reopen this reader on a specific commit point.
+               /// This always returns a readOnly reader.  If the
+               /// specified commit point matches what this reader is
+               /// already on, and this reader is already readOnly, then
+               /// this same instance is returned; if it is not already
+               /// readOnly, a readOnly clone is returned. 
+               /// </summary>
+               public virtual IndexReader Reopen(IndexCommit commit)
+               {
+                       lock (this)
+                       {
+                               throw new System.NotSupportedException("This reader does not support reopen(IndexCommit).");
+                       }
+               }
+               
+               /// <summary> Efficiently clones the IndexReader (sharing most
+               /// internal state).
+               /// <p/>
+               /// On cloning a reader with pending changes (deletions,
+               /// norms), the original reader transfers its write lock to
+               /// the cloned reader.  This means only the cloned reader
+               /// may make further changes to the index, and commit the
+               /// changes to the index on close, but the old reader still
+               /// reflects all changes made up until it was cloned.
+               /// <p/>
+               /// Like {@link #Reopen()}, it's safe to make changes to
+               /// either the original or the cloned reader: all shared
+               /// mutable state obeys "copy on write" semantics to ensure
+               /// the changes are not seen by other readers.
+               /// <p/>
+               /// </summary>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               public virtual System.Object Clone()
+               {
+                       throw new System.NotSupportedException("This reader does not implement clone()");
+               }
+               
+               /// <summary> Clones the IndexReader and optionally changes readOnly.  A readOnly 
+               /// reader cannot open a writeable reader.  
+               /// </summary>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               public virtual IndexReader Clone(bool openReadOnly)
+               {
+                       lock (this)
+                       {
+                               throw new System.NotSupportedException("This reader does not implement clone()");
+                       }
+               }
+               
+               /// <summary> Returns the directory associated with this index.  The Default 
+               /// implementation returns the directory specified by subclasses when 
+               /// delegating to the IndexReader(Directory) constructor, or throws an 
+               /// UnsupportedOperationException if one was not specified.
+               /// </summary>
+               /// <throws>  UnsupportedOperationException if no directory </throws>
+               public virtual Directory Directory()
+               {
+                       EnsureOpen();
+                       if (null != directory)
+                       {
+                               return directory;
+                       }
+                       else
+                       {
+                               throw new System.NotSupportedException("This reader does not support this method.");
+                       }
+               }
+               
+               /// <summary> Returns the time the index in the named directory was last modified.
+               /// Do not use this to check whether the reader is still up-to-date, use
+               /// {@link #IsCurrent()} instead. 
+               /// </summary>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               /// <deprecated> Use {@link #LastModified(Directory)} instead.
+               /// This method will be removed in the 3.0 release.
+               /// </deprecated>
+        [Obsolete("Use LastModified(Directory) instead. This method will be removed in the 3.0 release.")]
+               public static long LastModified(System.String directory)
+               {
+                       return LastModified(new System.IO.FileInfo(directory));
+               }
+               
+               /// <summary> Returns the time the index in the named directory was last modified. 
+               /// Do not use this to check whether the reader is still up-to-date, use
+               /// {@link #IsCurrent()} instead. 
+               /// </summary>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               /// <deprecated> Use {@link #LastModified(Directory)} instead.
+               /// This method will be removed in the 3.0 release.
+               /// 
+               /// </deprecated>
+        [Obsolete("Use LastModified(Directory) instead. This method will be removed in the 3.0 release.")]
+               public static long LastModified(System.IO.FileInfo fileDirectory)
+               {
+                       Directory dir = FSDirectory.GetDirectory(fileDirectory); // use new static method here
+                       try
+                       {
+                               return LastModified(dir);
+                       }
+                       finally
+                       {
+                               dir.Close();
+                       }
+               }
+               
+               /// <summary> Returns the time the index in the named directory was last modified. 
+               /// Do not use this to check whether the reader is still up-to-date, use
+               /// {@link #IsCurrent()} instead. 
+               /// </summary>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               public static long LastModified(Directory directory2)
+               {
+                       return (long) ((System.Int64) new AnonymousClassFindSegmentsFile(directory2, directory2).Run());
+               }
+               
+               /// <summary> Reads version number from segments files. The version number is
+               /// initialized with a timestamp and then increased by one for each change of
+               /// the index.
+               /// 
+               /// </summary>
+               /// <param name="directory">where the index resides.
+               /// </param>
+               /// <returns> version number.
+               /// </returns>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               /// <deprecated> Use {@link #GetCurrentVersion(Directory)} instead.
+               /// This method will be removed in the 3.0 release.
+               /// </deprecated>
+        [Obsolete("Use GetCurrentVersion(Directory) instead. This method will be removed in the 3.0 release.")]
+               public static long GetCurrentVersion(System.String directory)
+               {
+                       return GetCurrentVersion(new System.IO.FileInfo(directory));
+               }
+               
+               /// <summary> Reads version number from segments files. The version number is
+               /// initialized with a timestamp and then increased by one for each change of
+               /// the index.
+               /// 
+               /// </summary>
+               /// <param name="directory">where the index resides.
+               /// </param>
+               /// <returns> version number.
+               /// </returns>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               /// <deprecated> Use {@link #GetCurrentVersion(Directory)} instead.
+               /// This method will be removed in the 3.0 release.
+               /// </deprecated>
+        [Obsolete("Use GetCurrentVersion(Directory) instead. This method will be removed in the 3.0 release.")]
+               public static long GetCurrentVersion(System.IO.FileInfo directory)
+               {
+                       Directory dir = FSDirectory.GetDirectory(directory);
+                       try
+                       {
+                               return GetCurrentVersion(dir);
+                       }
+                       finally
+                       {
+                               dir.Close();
+                       }
+               }
+               
+               /// <summary> Reads version number from segments files. The version number is
+               /// initialized with a timestamp and then increased by one for each change of
+               /// the index.
+               /// 
+               /// </summary>
+               /// <param name="directory">where the index resides.
+               /// </param>
+               /// <returns> version number.
+               /// </returns>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               public static long GetCurrentVersion(Directory directory)
+               {
+                       return SegmentInfos.ReadCurrentVersion(directory);
+               }
+               
+               /// <summary> Reads commitUserData, previously passed to {@link
+               /// IndexWriter#Commit(Map)}, from current index
+               /// segments file.  This will return null if {@link
+               /// IndexWriter#Commit(Map)} has never been called for
+               /// this index.
+               /// 
+               /// </summary>
+               /// <param name="directory">where the index resides.
+               /// </param>
+               /// <returns> commit userData.
+               /// </returns>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               /// <summary> 
+               /// </summary>
+               /// <seealso cref="GetCommitUserData()">
+               /// </seealso>
+        public static System.Collections.Generic.IDictionary<string, string> GetCommitUserData(Directory directory)
+               {
+                       return SegmentInfos.ReadCurrentUserData(directory);
+               }
+               
+               /// <summary> Version number when this IndexReader was opened. Not implemented in the
+               /// IndexReader base class.
+               /// 
+               /// <p/>
+               /// If this reader is based on a Directory (ie, was created by calling
+               /// {@link #Open}, or {@link #Reopen} on a reader based on a Directory), then
+               /// this method returns the version recorded in the commit that the reader
+               /// opened. This version is advanced every time {@link IndexWriter#Commit} is
+               /// called.
+               /// <p/>
+               /// 
+               /// <p/>
+               /// If instead this reader is a near real-time reader (ie, obtained by a call
+               /// to {@link IndexWriter#GetReader}, or by calling {@link #Reopen} on a near
+               /// real-time reader), then this method returns the version of the last
+               /// commit done by the writer. Note that even as further changes are made
+               /// with the writer, the version will not changed until a commit is
+               /// completed. Thus, you should not rely on this method to determine when a
+               /// near real-time reader should be opened. Use {@link #IsCurrent} instead.
+               /// <p/>
+               /// 
+               /// </summary>
+               /// <throws>  UnsupportedOperationException </throws>
+               /// <summary>             unless overridden in subclass
+               /// </summary>
+               public virtual long GetVersion()
+               {
+                       throw new System.NotSupportedException("This reader does not support this method.");
+               }
+               
+               /// <summary> Retrieve the String userData optionally passed to
+               /// IndexWriter#commit.  This will return null if {@link
+               /// IndexWriter#Commit(Map)} has never been called for
+               /// this index.
+               /// 
+               /// </summary>
+               /// <seealso cref="GetCommitUserData(Directory)">
+               /// </seealso>
+        public virtual System.Collections.Generic.IDictionary<string, string> GetCommitUserData()
+               {
+                       throw new System.NotSupportedException("This reader does not support this method.");
+               }
+               
+               /// <summary><p/>For IndexReader implementations that use
+               /// TermInfosReader to read terms, this sets the
+               /// indexDivisor to subsample the number of indexed terms
+               /// loaded into memory.  This has the same effect as {@link
+               /// IndexWriter#setTermIndexInterval} except that setting
+               /// must be done at indexing time while this setting can be
+               /// set per reader.  When set to N, then one in every
+               /// N*termIndexInterval terms in the index is loaded into
+               /// memory.  By setting this to a value > 1 you can reduce
+               /// memory usage, at the expense of higher latency when
+               /// loading a TermInfo.  The default value is 1.<p/>
+               /// 
+               /// <b>NOTE:</b> you must call this before the term
+               /// index is loaded.  If the index is already loaded, 
+               /// an IllegalStateException is thrown.
+               /// </summary>
+               /// <throws>  IllegalStateException if the term index has already been loaded into memory </throws>
+               /// <deprecated> Please use {@link IndexReader#Open(Directory, IndexDeletionPolicy, boolean, int)} to specify the required TermInfos index divisor instead.
+               /// </deprecated>
+        [Obsolete("Please use IndexReader.Open(Directory, IndexDeletionPolicy, bool, int) to specify the required TermInfos index divisor instead.")]
+               public virtual void  SetTermInfosIndexDivisor(int indexDivisor)
+               {
+                       throw new System.NotSupportedException("Please pass termInfosIndexDivisor up-front when opening IndexReader");
+               }
+               
+               /// <summary><p/>For IndexReader implementations that use
+               /// TermInfosReader to read terms, this returns the
+               /// current indexDivisor as specified when the reader was
+               /// opened.
+               /// </summary>
+               public virtual int GetTermInfosIndexDivisor()
+               {
+                       throw new System.NotSupportedException("This reader does not support this method.");
+               }
+               
+               /// <summary> Check whether any new changes have occurred to the index since this
+               /// reader was opened.
+               /// 
+               /// <p/>
+               /// If this reader is based on a Directory (ie, was created by calling
+               /// {@link #open}, or {@link #reopen} on a reader based on a Directory), then
+               /// this method checks if any further commits (see {@link IndexWriter#commit}
+               /// have occurred in that directory).
+               /// <p/>
+               /// 
+               /// <p/>
+               /// If instead this reader is a near real-time reader (ie, obtained by a call
+               /// to {@link IndexWriter#getReader}, or by calling {@link #reopen} on a near
+               /// real-time reader), then this method checks if either a new commmit has
+               /// occurred, or any new uncommitted changes have taken place via the writer.
+               /// Note that even if the writer has only performed merging, this method will
+               /// still return false.
+               /// <p/>
+               /// 
+               /// <p/>
+               /// In any event, if this returns false, you should call {@link #reopen} to
+               /// get a new reader that sees the changes.
+               /// <p/>
+               /// 
+               /// </summary>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               /// <throws>  UnsupportedOperationException unless overridden in subclass </throws>
+               public virtual bool IsCurrent()
+               {
+                       throw new System.NotSupportedException("This reader does not support this method.");
+               }
+               
+               /// <summary> Checks is the index is optimized (if it has a single segment and 
+               /// no deletions).  Not implemented in the IndexReader base class.
+               /// </summary>
+               /// <returns> <code>true</code> if the index is optimized; <code>false</code> otherwise
+               /// </returns>
+               /// <throws>  UnsupportedOperationException unless overridden in subclass </throws>
+               public virtual bool IsOptimized()
+               {
+                       throw new System.NotSupportedException("This reader does not support this method.");
+               }
+               
+               /// <summary> Return an array of term frequency vectors for the specified document.
+               /// The array contains a vector for each vectorized field in the document.
+               /// Each vector contains terms and frequencies for all terms in a given vectorized field.
+               /// If no such fields existed, the method returns null. The term vectors that are
+               /// returned may either be of type {@link TermFreqVector}
+               /// or of type {@link TermPositionVector} if
+               /// positions or offsets have been stored.
+               /// 
+               /// </summary>
+               /// <param name="docNumber">document for which term frequency vectors are returned
+               /// </param>
+               /// <returns> array of term frequency vectors. May be null if no term vectors have been
+               /// stored for the specified document.
+               /// </returns>
+               /// <throws>  IOException if index cannot be accessed </throws>
+               /// <seealso cref="Mono.Lucene.Net.Documents.Field.TermVector">
+               /// </seealso>
+               abstract public TermFreqVector[] GetTermFreqVectors(int docNumber);
+               
+               
+               /// <summary> Return a term frequency vector for the specified document and field. The
+               /// returned vector contains terms and frequencies for the terms in
+               /// the specified field of this document, if the field had the storeTermVector
+               /// flag set. If termvectors had been stored with positions or offsets, a 
+               /// {@link TermPositionVector} is returned.
+               /// 
+               /// </summary>
+               /// <param name="docNumber">document for which the term frequency vector is returned
+               /// </param>
+               /// <param name="field">field for which the term frequency vector is returned.
+               /// </param>
+               /// <returns> term frequency vector May be null if field does not exist in the specified
+               /// document or term vector was not stored.
+               /// </returns>
+               /// <throws>  IOException if index cannot be accessed </throws>
+               /// <seealso cref="Mono.Lucene.Net.Documents.Field.TermVector">
+               /// </seealso>
+               abstract public TermFreqVector GetTermFreqVector(int docNumber, System.String field);
+               
+               /// <summary> Load the Term Vector into a user-defined data structure instead of relying on the parallel arrays of
+               /// the {@link TermFreqVector}.
+               /// </summary>
+               /// <param name="docNumber">The number of the document to load the vector for
+               /// </param>
+               /// <param name="field">The name of the field to load
+               /// </param>
+               /// <param name="mapper">The {@link TermVectorMapper} to process the vector.  Must not be null
+               /// </param>
+               /// <throws>  IOException if term vectors cannot be accessed or if they do not exist on the field and doc. specified. </throws>
+               /// <summary> 
+               /// </summary>
+               abstract public void  GetTermFreqVector(int docNumber, System.String field, TermVectorMapper mapper);
+               
+               /// <summary> Map all the term vectors for all fields in a Document</summary>
+               /// <param name="docNumber">The number of the document to load the vector for
+               /// </param>
+               /// <param name="mapper">The {@link TermVectorMapper} to process the vector.  Must not be null
+               /// </param>
+               /// <throws>  IOException if term vectors cannot be accessed or if they do not exist on the field and doc. specified. </throws>
+               abstract public void  GetTermFreqVector(int docNumber, TermVectorMapper mapper);
+               
+               /// <summary> Returns <code>true</code> if an index exists at the specified directory.
+               /// If the directory does not exist or if there is no index in it.
+               /// <code>false</code> is returned.
+               /// </summary>
+               /// <param name="directory">the directory to check for an index
+               /// </param>
+               /// <returns> <code>true</code> if an index exists; <code>false</code> otherwise
+               /// </returns>
+               /// <deprecated> Use {@link #IndexExists(Directory)} instead
+               /// This method will be removed in the 3.0 release.
+               /// 
+               /// </deprecated>
+        [Obsolete("Use IndexExists(Directory) instead. This method will be removed in the 3.0 release.")]
+               public static bool IndexExists(System.String directory)
+               {
+                       return IndexExists(new System.IO.FileInfo(directory));
+               }
+               
+               /// <summary> Returns <code>true</code> if an index exists at the specified directory.
+               /// If the directory does not exist or if there is no index in it.
+               /// </summary>
+               /// <param name="directory">the directory to check for an index
+               /// </param>
+               /// <returns> <code>true</code> if an index exists; <code>false</code> otherwise
+               /// </returns>
+               /// <deprecated> Use {@link #IndexExists(Directory)} instead.
+               /// This method will be removed in the 3.0 release.
+               /// 
+               /// </deprecated>
+        [Obsolete("Use IndexExists(Directory) instead. This method will be removed in the 3.0 release.")]
+               public static bool IndexExists(System.IO.FileInfo directory)
+               {
+            System.String[] list = null;
+            if (System.IO.Directory.Exists(directory.FullName))
+            {
+                System.IO.DirectoryInfo di = new System.IO.DirectoryInfo(directory.FullName);
+                System.IO.FileInfo[] fi = di.GetFiles();
+                if (fi.Length > 0)
+                {
+                    list = new System.String[fi.Length];
+                    for (int i = 0; i < fi.Length; i++)
+                    {
+                        list[i] = fi[i].Name;
+                    }
+                }
+            }
+                       return SegmentInfos.GetCurrentSegmentGeneration(list) != - 1;
+               }
+               
+               /// <summary> Returns <code>true</code> if an index exists at the specified directory.
+               /// If the directory does not exist or if there is no index in it.
+               /// </summary>
+               /// <param name="directory">the directory to check for an index
+               /// </param>
+               /// <returns> <code>true</code> if an index exists; <code>false</code> otherwise
+               /// </returns>
+               /// <throws>  IOException if there is a problem with accessing the index </throws>
+               public static bool IndexExists(Directory directory)
+               {
+                       return SegmentInfos.GetCurrentSegmentGeneration(directory) != - 1;
+               }
+               
+               /// <summary>Returns the number of documents in this index. </summary>
+               public abstract int NumDocs();
+               
+               /// <summary>Returns one greater than the largest possible document number.
+               /// This may be used to, e.g., determine how big to allocate an array which
+               /// will have an element for every document number in an index.
+               /// </summary>
+               public abstract int MaxDoc();
+               
+               /// <summary>Returns the number of deleted documents. </summary>
+               public virtual int NumDeletedDocs()
+               {
+                       return MaxDoc() - NumDocs();
+               }
+               
+               /// <summary> Returns the stored fields of the <code>n</code><sup>th</sup>
+               /// <code>Document</code> in this index.
+               /// <p/>
+               /// <b>NOTE:</b> for performance reasons, this method does not check if the
+               /// requested document is deleted, and therefore asking for a deleted document
+               /// may yield unspecified results. Usually this is not required, however you
+               /// can call {@link #IsDeleted(int)} with the requested document ID to verify
+               /// the document is not deleted.
+               /// 
+               /// </summary>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               public virtual Document Document(int n)
+               {
+                       EnsureOpen();
+                       return Document(n, null);
+               }
+               
+               /// <summary> Get the {@link Mono.Lucene.Net.Documents.Document} at the <code>n</code>
+               /// <sup>th</sup> position. The {@link FieldSelector} may be used to determine
+               /// what {@link Mono.Lucene.Net.Documents.Field}s to load and how they should
+               /// be loaded. <b>NOTE:</b> If this Reader (more specifically, the underlying
+               /// <code>FieldsReader</code>) is closed before the lazy
+               /// {@link Mono.Lucene.Net.Documents.Field} is loaded an exception may be
+               /// thrown. If you want the value of a lazy
+               /// {@link Mono.Lucene.Net.Documents.Field} to be available after closing you
+               /// must explicitly load it or fetch the Document again with a new loader.
+               /// <p/>
+               /// <b>NOTE:</b> for performance reasons, this method does not check if the
+               /// requested document is deleted, and therefore asking for a deleted document
+               /// may yield unspecified results. Usually this is not required, however you
+               /// can call {@link #IsDeleted(int)} with the requested document ID to verify
+               /// the document is not deleted.
+               /// 
+               /// </summary>
+               /// <param name="n">Get the document at the <code>n</code><sup>th</sup> position
+               /// </param>
+               /// <param name="fieldSelector">The {@link FieldSelector} to use to determine what
+               /// Fields should be loaded on the Document. May be null, in which case
+               /// all Fields will be loaded.
+               /// </param>
+               /// <returns> The stored fields of the
+               /// {@link Mono.Lucene.Net.Documents.Document} at the nth position
+               /// </returns>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               /// <seealso cref="Mono.Lucene.Net.Documents.Fieldable">
+               /// </seealso>
+               /// <seealso cref="Mono.Lucene.Net.Documents.FieldSelector">
+               /// </seealso>
+               /// <seealso cref="Mono.Lucene.Net.Documents.SetBasedFieldSelector">
+               /// </seealso>
+               /// <seealso cref="Mono.Lucene.Net.Documents.LoadFirstFieldSelector">
+               /// </seealso>
+               // TODO (1.5): When we convert to JDK 1.5 make this Set<String>
+               public abstract Document Document(int n, FieldSelector fieldSelector);
+               
+               /// <summary>Returns true if document <i>n</i> has been deleted </summary>
+               public abstract bool IsDeleted(int n);
+               
+               /// <summary>Returns true if any documents have been deleted </summary>
+               public abstract bool HasDeletions();
+               
+               /// <summary>Returns true if there are norms stored for this field. </summary>
+               public virtual bool HasNorms(System.String field)
+               {
+                       // backward compatible implementation.
+                       // SegmentReader has an efficient implementation.
+                       EnsureOpen();
+                       return Norms(field) != null;
+               }
+               
+               /// <summary>Returns the byte-encoded normalization factor for the named field of
+               /// every document.  This is used by the search code to score documents.
+               /// 
+               /// </summary>
+               /// <seealso cref="Mono.Lucene.Net.Documents.Field.SetBoost(float)">
+               /// </seealso>
+               public abstract byte[] Norms(System.String field);
+               
+               /// <summary>Reads the byte-encoded normalization factor for the named field of every
+               /// document.  This is used by the search code to score documents.
+               /// 
+               /// </summary>
+               /// <seealso cref="Mono.Lucene.Net.Documents.Field.SetBoost(float)">
+               /// </seealso>
+               public abstract void  Norms(System.String field, byte[] bytes, int offset);
+               
+               /// <summary>Expert: Resets the normalization factor for the named field of the named
+               /// document.  The norm represents the product of the field's {@link
+               /// Mono.Lucene.Net.Documents.Fieldable#SetBoost(float) boost} and its {@link Similarity#LengthNorm(String,
+               /// int) length normalization}.  Thus, to preserve the length normalization
+               /// values when resetting this, one should base the new value upon the old.
+               /// 
+               /// <b>NOTE:</b> If this field does not store norms, then
+               /// this method call will silently do nothing.
+               /// 
+               /// </summary>
+               /// <seealso cref="Norms(String)">
+               /// </seealso>
+               /// <seealso cref="Similarity.DecodeNorm(byte)">
+               /// </seealso>
+               /// <throws>  StaleReaderException if the index has changed </throws>
+               /// <summary>  since this reader was opened
+               /// </summary>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  LockObtainFailedException if another writer </throws>
+               /// <summary>  has this index open (<code>write.lock</code> could not
+               /// be obtained)
+               /// </summary>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               public virtual void  SetNorm(int doc, System.String field, byte value_Renamed)
+               {
+                       lock (this)
+                       {
+                               EnsureOpen();
+                               AcquireWriteLock();
+                               hasChanges = true;
+                               DoSetNorm(doc, field, value_Renamed);
+                       }
+               }
+               
+               /// <summary>Implements setNorm in subclass.</summary>
+               protected internal abstract void  DoSetNorm(int doc, System.String field, byte value_Renamed);
+               
+               /// <summary>Expert: Resets the normalization factor for the named field of the named
+               /// document.
+               /// 
+               /// </summary>
+               /// <seealso cref="Norms(String)">
+               /// </seealso>
+               /// <seealso cref="Similarity.DecodeNorm(byte)">
+               /// 
+               /// </seealso>
+               /// <throws>  StaleReaderException if the index has changed </throws>
+               /// <summary>  since this reader was opened
+               /// </summary>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  LockObtainFailedException if another writer </throws>
+               /// <summary>  has this index open (<code>write.lock</code> could not
+               /// be obtained)
+               /// </summary>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               public virtual void  SetNorm(int doc, System.String field, float value_Renamed)
+               {
+                       EnsureOpen();
+                       SetNorm(doc, field, Similarity.EncodeNorm(value_Renamed));
+               }
+               
+               /// <summary>Returns an enumeration of all the terms in the index. The
+               /// enumeration is ordered by Term.compareTo(). Each term is greater
+               /// than all that precede it in the enumeration. Note that after
+               /// calling terms(), {@link TermEnum#Next()} must be called
+               /// on the resulting enumeration before calling other methods such as
+               /// {@link TermEnum#Term()}.
+               /// </summary>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               public abstract TermEnum Terms();
+               
+               /// <summary>Returns an enumeration of all terms starting at a given term. If
+               /// the given term does not exist, the enumeration is positioned at the
+               /// first term greater than the supplied term. The enumeration is
+               /// ordered by Term.compareTo(). Each term is greater than all that
+               /// precede it in the enumeration.
+               /// </summary>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               public abstract TermEnum Terms(Term t);
+               
+               /// <summary>Returns the number of documents containing the term <code>t</code>.</summary>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               public abstract int DocFreq(Term t);
+               
+               /// <summary>Returns an enumeration of all the documents which contain
+               /// <code>term</code>. For each document, the document number, the frequency of
+               /// the term in that document is also provided, for use in
+               /// search scoring.  If term is null, then all non-deleted
+               /// docs are returned with freq=1.
+               /// Thus, this method implements the mapping:
+               /// <p/><ul>
+               /// Term &#160;&#160; =&gt; &#160;&#160; &lt;docNum, freq&gt;<sup>*</sup>
+               /// </ul>
+               /// <p/>The enumeration is ordered by document number.  Each document number
+               /// is greater than all that precede it in the enumeration.
+               /// </summary>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               public virtual TermDocs TermDocs(Term term)
+               {
+                       EnsureOpen();
+                       TermDocs termDocs = TermDocs();
+                       termDocs.Seek(term);
+                       return termDocs;
+               }
+               
+               /// <summary>Returns an unpositioned {@link TermDocs} enumerator.</summary>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               public abstract TermDocs TermDocs();
+               
+               /// <summary>Returns an enumeration of all the documents which contain
+               /// <code>term</code>.  For each document, in addition to the document number
+               /// and frequency of the term in that document, a list of all of the ordinal
+               /// positions of the term in the document is available.  Thus, this method
+               /// implements the mapping:
+               /// 
+               /// <p/><ul>
+               /// Term &#160;&#160; =&gt; &#160;&#160; &lt;docNum, freq,
+               /// &lt;pos<sub>1</sub>, pos<sub>2</sub>, ...
+               /// pos<sub>freq-1</sub>&gt;
+               /// &gt;<sup>*</sup>
+               /// </ul>
+               /// <p/> This positional information facilitates phrase and proximity searching.
+               /// <p/>The enumeration is ordered by document number.  Each document number is
+               /// greater than all that precede it in the enumeration.
+               /// </summary>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               public virtual TermPositions TermPositions(Term term)
+               {
+                       EnsureOpen();
+                       TermPositions termPositions = TermPositions();
+                       termPositions.Seek(term);
+                       return termPositions;
+               }
+               
+               /// <summary>Returns an unpositioned {@link TermPositions} enumerator.</summary>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               public abstract TermPositions TermPositions();
+               
+               
+               
+               /// <summary>Deletes the document numbered <code>docNum</code>.  Once a document is
+               /// deleted it will not appear in TermDocs or TermPostitions enumerations.
+               /// Attempts to read its field with the {@link #document}
+               /// method will result in an error.  The presence of this document may still be
+               /// reflected in the {@link #docFreq} statistic, though
+               /// this will be corrected eventually as the index is further modified.
+               /// 
+               /// </summary>
+               /// <throws>  StaleReaderException if the index has changed </throws>
+               /// <summary> since this reader was opened
+               /// </summary>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  LockObtainFailedException if another writer </throws>
+               /// <summary>  has this index open (<code>write.lock</code> could not
+               /// be obtained)
+               /// </summary>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               public virtual void  DeleteDocument(int docNum)
+               {
+                       lock (this)
+                       {
+                               EnsureOpen();
+                               AcquireWriteLock();
+                               hasChanges = true;
+                               DoDelete(docNum);
+                       }
+               }
+               
+               
+               /// <summary>Implements deletion of the document numbered <code>docNum</code>.
+               /// Applications should call {@link #DeleteDocument(int)} or {@link #DeleteDocuments(Term)}.
+               /// </summary>
+               protected internal abstract void  DoDelete(int docNum);
+               
+               
+               /// <summary>Deletes all documents that have a given <code>term</code> indexed.
+               /// This is useful if one uses a document field to hold a unique ID string for
+               /// the document.  Then to delete such a document, one merely constructs a
+               /// term with the appropriate field and the unique ID string as its text and
+               /// passes it to this method.
+               /// See {@link #DeleteDocument(int)} for information about when this deletion will 
+               /// become effective.
+               /// 
+               /// </summary>
+               /// <returns> the number of documents deleted
+               /// </returns>
+               /// <throws>  StaleReaderException if the index has changed </throws>
+               /// <summary>  since this reader was opened
+               /// </summary>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  LockObtainFailedException if another writer </throws>
+               /// <summary>  has this index open (<code>write.lock</code> could not
+               /// be obtained)
+               /// </summary>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               public virtual int DeleteDocuments(Term term)
+               {
+                       EnsureOpen();
+                       TermDocs docs = TermDocs(term);
+                       if (docs == null)
+                               return 0;
+                       int n = 0;
+                       try
+                       {
+                               while (docs.Next())
+                               {
+                                       DeleteDocument(docs.Doc());
+                                       n++;
+                               }
+                       }
+                       finally
+                       {
+                               docs.Close();
+                       }
+                       return n;
+               }
+               
+               /// <summary>Undeletes all documents currently marked as deleted in this index.
+               /// 
+               /// </summary>
+               /// <throws>  StaleReaderException if the index has changed </throws>
+               /// <summary>  since this reader was opened
+               /// </summary>
+               /// <throws>  LockObtainFailedException if another writer </throws>
+               /// <summary>  has this index open (<code>write.lock</code> could not
+               /// be obtained)
+               /// </summary>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               public virtual void  UndeleteAll()
+               {
+                       lock (this)
+                       {
+                               EnsureOpen();
+                               AcquireWriteLock();
+                               hasChanges = true;
+                               DoUndeleteAll();
+                       }
+               }
+               
+               /// <summary>Implements actual undeleteAll() in subclass. </summary>
+               protected internal abstract void  DoUndeleteAll();
+               
+               /// <summary>Does nothing by default. Subclasses that require a write lock for
+               /// index modifications must implement this method. 
+               /// </summary>
+               protected internal virtual void  AcquireWriteLock()
+               {
+                       lock (this)
+                       {
+                               /* NOOP */
+                       }
+               }
+               
+               /// <summary> </summary>
+               /// <throws>  IOException </throws>
+               public void  Flush()
+               {
+                       lock (this)
+                       {
+                               EnsureOpen();
+                               Commit();
+                       }
+               }
+               
+               /// <param name="commitUserData">Opaque Map (String -> String)
+               /// that's recorded into the segments file in the index,
+               /// and retrievable by {@link
+               /// IndexReader#getCommitUserData}.
+               /// </param>
+               /// <throws>  IOException </throws>
+        public void Flush(System.Collections.Generic.IDictionary<string, string> commitUserData)
+               {
+                       lock (this)
+                       {
+                               EnsureOpen();
+                               Commit(commitUserData);
+                       }
+               }
+               
+               /// <summary> Commit changes resulting from delete, undeleteAll, or
+               /// setNorm operations
+               /// 
+               /// If an exception is hit, then either no changes or all
+               /// changes will have been committed to the index
+               /// (transactional semantics).
+               /// </summary>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               public /*protected internal*/ void  Commit()
+               {
+                       lock (this)
+                       {
+                               Commit(null);
+                       }
+               }
+               
+               /// <summary> Commit changes resulting from delete, undeleteAll, or
+               /// setNorm operations
+               /// 
+               /// If an exception is hit, then either no changes or all
+               /// changes will have been committed to the index
+               /// (transactional semantics).
+               /// </summary>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+        public void Commit(System.Collections.Generic.IDictionary<string, string> commitUserData)
+               {
+                       lock (this)
+                       {
+                               if (hasChanges)
+                               {
+                                       DoCommit(commitUserData);
+                               }
+                               hasChanges = false;
+                       }
+               }
+               
+               /// <summary>Implements commit.</summary>
+               /// <deprecated> Please implement {@link #DoCommit(Map)
+               /// instead}. 
+               /// </deprecated>
+        [Obsolete("Please implement DoCommit(IDictionary<string, string>) instead")]
+               protected internal abstract void  DoCommit();
+               
+               /// <summary>Implements commit.  NOTE: subclasses should override
+               /// this.  In 3.0 this will become an abstract method. 
+               /// </summary>
+        protected internal virtual void DoCommit(System.Collections.Generic.IDictionary<string, string> commitUserData)
+               {
+                       // Default impl discards commitUserData; all Lucene
+                       // subclasses override this (do not discard it).
+                       DoCommit();
+               }
+               
+               /// <summary> Closes files associated with this index.
+               /// Also saves any new deletions to disk.
+               /// No other methods should be called after this has been called.
+               /// </summary>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               public void  Close()
+               {
+                       lock (this)
+                       {
+                               if (!closed)
+                               {
+                                       DecRef();
+                                       closed = true;
+                               }
+                       }
+               }
+
+        /// <summary>
+        /// .NET
+        /// </summary>
+        public void Dispose()
+        {
+            Close();
+        }
+               
+               /// <summary>Implements close. </summary>
+               protected internal abstract void  DoClose();
+               
+               
+               /// <summary> Get a list of unique field names that exist in this index and have the specified
+               /// field option information.
+               /// </summary>
+               /// <param name="fldOption">specifies which field option should be available for the returned fields
+               /// </param>
+               /// <returns> Collection of Strings indicating the names of the fields.
+               /// </returns>
+               /// <seealso cref="IndexReader.FieldOption">
+               /// </seealso>
+               public abstract System.Collections.Generic.ICollection<string> GetFieldNames(FieldOption fldOption);
+               
+               /// <summary> Returns <code>true</code> iff the index in the named directory is
+               /// currently locked.
+               /// </summary>
+               /// <param name="directory">the directory to check for a lock
+               /// </param>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               /// <deprecated> Please use {@link IndexWriter#IsLocked(Directory)} instead.
+               /// This method will be removed in the 3.0 release.
+               /// 
+               /// </deprecated>
+        [Obsolete("Please use IndexWriter.IsLocked(Directory) instead. This method will be removed in the 3.0 release.")]
+               public static bool IsLocked(Directory directory)
+               {
+                       return directory.MakeLock(IndexWriter.WRITE_LOCK_NAME).IsLocked();
+               }
+               
+               /// <summary> Returns <code>true</code> iff the index in the named directory is
+               /// currently locked.
+               /// </summary>
+               /// <param name="directory">the directory to check for a lock
+               /// </param>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               /// <deprecated> Use {@link #IsLocked(Directory)} instead.
+               /// This method will be removed in the 3.0 release.
+               /// 
+               /// </deprecated>
+        [Obsolete("Use IsLocked(Directory) instead. This method will be removed in the 3.0 release.")]
+               public static bool IsLocked(System.String directory)
+               {
+                       Directory dir = FSDirectory.GetDirectory(directory);
+                       try
+                       {
+                               return IsLocked(dir);
+                       }
+                       finally
+                       {
+                               dir.Close();
+                       }
+               }
+               
+               /// <summary> Forcibly unlocks the index in the named directory.
+               /// <p/>
+               /// Caution: this should only be used by failure recovery code,
+               /// when it is known that no other process nor thread is in fact
+               /// currently accessing this index.
+               /// </summary>
+               /// <deprecated> Please use {@link IndexWriter#Unlock(Directory)} instead.
+               /// This method will be removed in the 3.0 release.
+               /// 
+               /// </deprecated>
+        [Obsolete("Please use IndexWriter.Unlock(Directory) instead. This method will be removed in the 3.0 release.")]
+               public static void  Unlock(Directory directory)
+               {
+                       directory.MakeLock(IndexWriter.WRITE_LOCK_NAME).Release();
+               }
+               
+               /// <summary> Expert: return the IndexCommit that this reader has
+               /// opened.  This method is only implemented by those
+               /// readers that correspond to a Directory with its own
+               /// segments_N file.
+               /// 
+               /// <p/><b>WARNING</b>: this API is new and experimental and
+               /// may suddenly change.<p/>
+               /// </summary>
+               public virtual IndexCommit GetIndexCommit()
+               {
+                       throw new System.NotSupportedException("This reader does not support this method.");
+               }
+               
+               /// <summary> Prints the filename and size of each file within a given compound file.
+               /// Add the -extract flag to extract files to the current working directory.
+               /// In order to make the extracted version of the index work, you have to copy
+               /// the segments file from the compound index into the directory where the extracted files are stored.
+               /// </summary>
+               /// <param name="args">Usage: Mono.Lucene.Net.Index.IndexReader [-extract] &lt;cfsfile&gt;
+               /// </param>
+               [STAThread]
+               public static void  Main(System.String[] args)
+               {
+                       System.String filename = null;
+                       bool extract = false;
+                       
+                       for (int i = 0; i < args.Length; ++i)
+                       {
+                               if (args[i].Equals("-extract"))
+                               {
+                                       extract = true;
+                               }
+                               else if (filename == null)
+                               {
+                                       filename = args[i];
+                               }
+                       }
+                       
+                       if (filename == null)
+                       {
+                               System.Console.Out.WriteLine("Usage: Mono.Lucene.Net.Index.IndexReader [-extract] <cfsfile>");
+                               return ;
+                       }
+                       
+                       Directory dir = null;
+                       CompoundFileReader cfr = null;
+                       
+                       try
+                       {
+                               System.IO.FileInfo file = new System.IO.FileInfo(filename);
+                               System.String dirname = new System.IO.FileInfo(file.FullName).DirectoryName;
+                               filename = file.Name;
+                               dir = FSDirectory.Open(new System.IO.FileInfo(dirname));
+                               cfr = new CompoundFileReader(dir, filename);
+                               
+                               System.String[] files = cfr.List();
+                               System.Array.Sort(files); // sort the array of filename so that the output is more readable
+                               
+                               for (int i = 0; i < files.Length; ++i)
+                               {
+                                       long len = cfr.FileLength(files[i]);
+                                       
+                                       if (extract)
+                                       {
+                                               System.Console.Out.WriteLine("extract " + files[i] + " with " + len + " bytes to local directory...");
+                                               IndexInput ii = cfr.OpenInput(files[i]);
+                                               
+                                               System.IO.FileStream f = new System.IO.FileStream(files[i], System.IO.FileMode.Create);
+                                               
+                                               // read and write with a small buffer, which is more effectiv than reading byte by byte
+                                               byte[] buffer = new byte[1024];
+                                               int chunk = buffer.Length;
+                                               while (len > 0)
+                                               {
+                                                       int bufLen = (int) System.Math.Min(chunk, len);
+                                                       ii.ReadBytes(buffer, 0, bufLen);
+                                                       f.Write(buffer, 0, bufLen);
+                                                       len -= bufLen;
+                                               }
+                                               
+                                               f.Close();
+                                               ii.Close();
+                                       }
+                                       else
+                                               System.Console.Out.WriteLine(files[i] + ": " + len + " bytes");
+                               }
+                       }
+                       catch (System.IO.IOException ioe)
+                       {
+                               System.Console.Error.WriteLine(ioe.StackTrace);
+                       }
+                       finally
+                       {
+                               try
+                               {
+                                       if (dir != null)
+                                               dir.Close();
+                                       if (cfr != null)
+                                               cfr.Close();
+                               }
+                               catch (System.IO.IOException ioe)
+                               {
+                                       System.Console.Error.WriteLine(ioe.StackTrace);
+                               }
+                       }
+               }
+               
+               /// <summary>Returns all commit points that exist in the Directory.
+               /// Normally, because the default is {@link
+               /// KeepOnlyLastCommitDeletionPolicy}, there would be only
+               /// one commit point.  But if you're using a custom {@link
+               /// IndexDeletionPolicy} then there could be many commits.
+               /// Once you have a given commit, you can open a reader on
+               /// it by calling {@link IndexReader#Open(IndexCommit)}
+               /// There must be at least one commit in
+               /// the Directory, else this method throws {@link
+               /// java.io.IOException}.  Note that if a commit is in
+               /// progress while this method is running, that commit
+               /// may or may not be returned array.  
+               /// </summary>
+               public static System.Collections.ICollection ListCommits(Directory dir)
+               {
+                       return DirectoryReader.ListCommits(dir);
+               }
+               
+               /// <summary>Expert: returns the sequential sub readers that this
+               /// reader is logically composed of.  For example,
+               /// IndexSearcher uses this API to drive searching by one
+               /// sub reader at a time.  If this reader is not composed
+               /// of sequential child readers, it should return null.
+               /// If this method returns an empty array, that means this
+               /// reader is a null reader (for example a MultiReader
+               /// that has no sub readers).
+               /// <p/>
+               /// NOTE: You should not try using sub-readers returned by
+               /// this method to make any changes (setNorm, deleteDocument,
+               /// etc.). While this might succeed for one composite reader
+               /// (like MultiReader), it will most likely lead to index
+               /// corruption for other readers (like DirectoryReader obtained
+               /// through {@link #open}. Use the parent reader directly. 
+               /// </summary>
+               public virtual IndexReader[] GetSequentialSubReaders()
+               {
+                       return null;
+               }
+               
+               /// <summary>Expert    </summary>
+               /// <deprecated> 
+               /// </deprecated>
+        [Obsolete]
+               public virtual System.Object GetFieldCacheKey()
+               {
+                       return this;
+               }
+
+        /** Expert.  Warning: this returns null if the reader has
+          *  no deletions 
+          */
+        public virtual object GetDeletesCacheKey()
+        {
+            return this;
+        }
+               
+               /// <summary>Returns the number of unique terms (across all fields)
+               /// in this reader.
+               /// 
+               /// This method returns long, even though internally
+               /// Lucene cannot handle more than 2^31 unique terms, for
+               /// a possible future when this limitation is removed.
+               /// 
+               /// </summary>
+               /// <throws>  UnsupportedOperationException if this count </throws>
+               /// <summary>  cannot be easily determined (eg Multi*Readers).
+               /// Instead, you should call {@link
+               /// #getSequentialSubReaders} and ask each sub reader for
+               /// its unique term count. 
+               /// </summary>
+               public virtual long GetUniqueTermCount()
+               {
+                       throw new System.NotSupportedException("this reader does not implement getUniqueTermCount()");
+               }
+               
+               /// <summary>Expert: Return the state of the flag that disables fakes norms in favor of representing the absence of field norms with null.</summary>
+               /// <returns> true if fake norms are disabled
+               /// </returns>
+               /// <deprecated> This currently defaults to false (to remain
+               /// back-compatible), but in 3.0 it will be hardwired to
+               /// true, meaning the norms() methods will return null for
+               /// fields that had disabled norms.
+               /// </deprecated>
+        [Obsolete("This currently defaults to false (to remain back-compatible), but in 3.0 it will be hardwired to true, meaning the norms() methods will return null for fields that had disabled norms.")]
+               public virtual bool GetDisableFakeNorms()
+               {
+                       return disableFakeNorms;
+               }
+               
+               /// <summary>Expert: Set the state of the flag that disables fakes norms in favor of representing the absence of field norms with null.</summary>
+               /// <param name="disableFakeNorms">true to disable fake norms, false to preserve the legacy behavior
+               /// </param>
+               /// <deprecated> This currently defaults to false (to remain
+               /// back-compatible), but in 3.0 it will be hardwired to
+               /// true, meaning the norms() methods will return null for
+               /// fields that had disabled norms.
+               /// </deprecated>
+        [Obsolete("This currently defaults to false (to remain back-compatible), but in 3.0 it will be hardwired to true, meaning the norms() methods will return null for fields that had disabled norms.")]
+               public virtual void  SetDisableFakeNorms(bool disableFakeNorms)
+               {
+                       this.disableFakeNorms = disableFakeNorms;
+               }
+
+        public bool hasChanges_ForNUnit
+        {
+            get { return hasChanges; }
+        }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/IndexWriter.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/IndexWriter.cs
new file mode 100644 (file)
index 0000000..af8221a
--- /dev/null
@@ -0,0 +1,6928 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using Analyzer = Mono.Lucene.Net.Analysis.Analyzer;
+using Document = Mono.Lucene.Net.Documents.Document;
+using IndexingChain = Mono.Lucene.Net.Index.DocumentsWriter.IndexingChain;
+using AlreadyClosedException = Mono.Lucene.Net.Store.AlreadyClosedException;
+using BufferedIndexInput = Mono.Lucene.Net.Store.BufferedIndexInput;
+using Directory = Mono.Lucene.Net.Store.Directory;
+using FSDirectory = Mono.Lucene.Net.Store.FSDirectory;
+using Lock = Mono.Lucene.Net.Store.Lock;
+using LockObtainFailedException = Mono.Lucene.Net.Store.LockObtainFailedException;
+using Constants = Mono.Lucene.Net.Util.Constants;
+using Query = Mono.Lucene.Net.Search.Query;
+using Similarity = Mono.Lucene.Net.Search.Similarity;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       /// <summary>An <code>IndexWriter</code> creates and maintains an index.
+       /// <p/>The <code>create</code> argument to the {@link
+       /// #IndexWriter(Directory, Analyzer, boolean) constructor} determines 
+       /// whether a new index is created, or whether an existing index is
+       /// opened.  Note that you can open an index with <code>create=true</code>
+       /// even while readers are using the index.  The old readers will 
+       /// continue to search the "point in time" snapshot they had opened, 
+       /// and won't see the newly created index until they re-open.  There are
+       /// also {@link #IndexWriter(Directory, Analyzer) constructors}
+       /// with no <code>create</code> argument which will create a new index
+       /// if there is not already an index at the provided path and otherwise 
+       /// open the existing index.<p/>
+       /// <p/>In either case, documents are added with {@link #AddDocument(Document)
+       /// addDocument} and removed with {@link #DeleteDocuments(Term)} or {@link
+       /// #DeleteDocuments(Query)}. A document can be updated with {@link
+       /// #UpdateDocument(Term, Document) updateDocument} (which just deletes
+       /// and then adds the entire document). When finished adding, deleting 
+       /// and updating documents, {@link #Close() close} should be called.<p/>
+       /// <a name="flush"></a>
+       /// <p/>These changes are buffered in memory and periodically
+       /// flushed to the {@link Directory} (during the above method
+       /// calls).  A flush is triggered when there are enough
+       /// buffered deletes (see {@link #setMaxBufferedDeleteTerms})
+       /// or enough added documents since the last flush, whichever
+       /// is sooner.  For the added documents, flushing is triggered
+       /// either by RAM usage of the documents (see {@link
+       /// #setRAMBufferSizeMB}) or the number of added documents.
+       /// The default is to flush when RAM usage hits 16 MB.  For
+       /// best indexing speed you should flush by RAM usage with a
+       /// large RAM buffer.  Note that flushing just moves the
+       /// internal buffered state in IndexWriter into the index, but
+       /// these changes are not visible to IndexReader until either
+       /// {@link #Commit()} or {@link #close} is called.  A flush may
+       /// also trigger one or more segment merges which by default
+       /// run with a background thread so as not to block the
+       /// addDocument calls (see <a href="#mergePolicy">below</a>
+       /// for changing the {@link MergeScheduler}).<p/>
+       /// <a name="autoCommit"></a>
+       /// <p/>The optional <code>autoCommit</code> argument to the {@link
+       /// #IndexWriter(Directory, boolean, Analyzer) constructors}
+       /// controls visibility of the changes to {@link IndexReader}
+       /// instances reading the same index.  When this is
+       /// <code>false</code>, changes are not visible until {@link
+       /// #Close()} or {@link #Commit()} is called.  Note that changes will still be
+       /// flushed to the {@link Directory} as new files, but are 
+       /// not committed (no new <code>segments_N</code> file is written 
+       /// referencing the new files, nor are the files sync'd to stable storage)
+       /// until {@link #Close()} or {@link #Commit()} is called.  If something
+       /// goes terribly wrong (for example the JVM crashes), then
+       /// the index will reflect none of the changes made since the
+       /// last commit, or the starting state if commit was not called.
+       /// You can also call {@link #Rollback()}, which closes the writer
+       /// without committing any changes, and removes any index
+       /// files that had been flushed but are now unreferenced.
+       /// This mode is useful for preventing readers from refreshing
+       /// at a bad time (for example after you've done all your
+       /// deletes but before you've done your adds).  It can also be
+       /// used to implement simple single-writer transactional
+       /// semantics ("all or none").  You can do a two-phase commit
+       /// by calling {@link #PrepareCommit()}
+       /// followed by {@link #Commit()}. This is necessary when
+       /// Lucene is working with an external resource (for example,
+       /// a database) and both must either commit or rollback the
+       /// transaction.<p/>
+       /// <p/>When <code>autoCommit</code> is <code>true</code> then
+       /// the writer will periodically commit on its own.  [<b>Deprecated</b>: Note that in 3.0, IndexWriter will
+       /// no longer accept autoCommit=true (it will be hardwired to
+       /// false).  You can always call {@link #Commit()} yourself
+       /// when needed]. There is
+       /// no guarantee when exactly an auto commit will occur (it
+       /// used to be after every flush, but it is now after every
+       /// completed merge, as of 2.4).  If you want to force a
+       /// commit, call {@link #Commit()}, or, close the writer.  Once
+       /// a commit has finished, newly opened {@link IndexReader} instances will
+       /// see the changes to the index as of that commit.  When
+       /// running in this mode, be careful not to refresh your
+       /// readers while optimize or segment merges are taking place
+       /// as this can tie up substantial disk space.<p/>
+       /// </summary>
+       /// <summary><p/>Regardless of <code>autoCommit</code>, an {@link
+       /// IndexReader} or {@link Mono.Lucene.Net.Search.IndexSearcher} will only see the
+       /// index as of the "point in time" that it was opened.  Any
+       /// changes committed to the index after the reader was opened
+       /// are not visible until the reader is re-opened.<p/>
+       /// <p/>If an index will not have more documents added for a while and optimal search
+       /// performance is desired, then either the full {@link #Optimize() optimize}
+       /// method or partial {@link #Optimize(int)} method should be
+       /// called before the index is closed.<p/>
+       /// <p/>Opening an <code>IndexWriter</code> creates a lock file for the directory in use. Trying to open
+       /// another <code>IndexWriter</code> on the same directory will lead to a
+       /// {@link LockObtainFailedException}. The {@link LockObtainFailedException}
+       /// is also thrown if an IndexReader on the same directory is used to delete documents
+       /// from the index.<p/>
+       /// </summary>
+       /// <summary><a name="deletionPolicy"></a>
+       /// <p/>Expert: <code>IndexWriter</code> allows an optional
+       /// {@link IndexDeletionPolicy} implementation to be
+       /// specified.  You can use this to control when prior commits
+       /// are deleted from the index.  The default policy is {@link
+       /// KeepOnlyLastCommitDeletionPolicy} which removes all prior
+       /// commits as soon as a new commit is done (this matches
+       /// behavior before 2.2).  Creating your own policy can allow
+       /// you to explicitly keep previous "point in time" commits
+       /// alive in the index for some time, to allow readers to
+       /// refresh to the new commit without having the old commit
+       /// deleted out from under them.  This is necessary on
+       /// filesystems like NFS that do not support "delete on last
+       /// close" semantics, which Lucene's "point in time" search
+       /// normally relies on. <p/>
+       /// <a name="mergePolicy"></a> <p/>Expert:
+       /// <code>IndexWriter</code> allows you to separately change
+       /// the {@link MergePolicy} and the {@link MergeScheduler}.
+       /// The {@link MergePolicy} is invoked whenever there are
+       /// changes to the segments in the index.  Its role is to
+       /// select which merges to do, if any, and return a {@link
+       /// MergePolicy.MergeSpecification} describing the merges.  It
+       /// also selects merges to do for optimize().  (The default is
+       /// {@link LogByteSizeMergePolicy}.  Then, the {@link
+       /// MergeScheduler} is invoked with the requested merges and
+       /// it decides when and how to run the merges.  The default is
+       /// {@link ConcurrentMergeScheduler}. <p/>
+       /// <a name="OOME"></a><p/><b>NOTE</b>: if you hit an
+       /// OutOfMemoryError then IndexWriter will quietly record this
+       /// fact and block all future segment commits.  This is a
+       /// defensive measure in case any internal state (buffered
+       /// documents and deletions) were corrupted.  Any subsequent
+       /// calls to {@link #Commit()} will throw an
+       /// IllegalStateException.  The only course of action is to
+       /// call {@link #Close()}, which internally will call {@link
+       /// #Rollback()}, to undo any changes to the index since the
+       /// last commit.  If you opened the writer with autoCommit
+       /// false you can also just call {@link #Rollback()}
+       /// directly.<p/>
+       /// <a name="thread-safety"></a><p/><b>NOTE</b>: {@link
+       /// <code>IndexWriter</code>} instances are completely thread
+       /// safe, meaning multiple threads can call any of its
+       /// methods, concurrently.  If your application requires
+       /// external synchronization, you should <b>not</b>
+       /// synchronize on the <code>IndexWriter</code> instance as
+       /// this may cause deadlock; use your own (non-Lucene) objects
+       /// instead. <p/>
+       /// </summary>
+       
+       /*
+       * Clarification: Check Points (and commits)
+       * Being able to set autoCommit=false allows IndexWriter to flush and 
+       * write new index files to the directory without writing a new segments_N
+       * file which references these new files. It also means that the state of 
+       * the in memory SegmentInfos object is different than the most recent
+       * segments_N file written to the directory.
+       * 
+       * Each time the SegmentInfos is changed, and matches the (possibly 
+       * modified) directory files, we have a new "check point". 
+       * If the modified/new SegmentInfos is written to disk - as a new 
+       * (generation of) segments_N file - this check point is also an 
+       * IndexCommit.
+       * 
+       * With autoCommit=true, every checkPoint is also a CommitPoint.
+       * With autoCommit=false, some checkPoints may not be commits.
+       * 
+       * A new checkpoint always replaces the previous checkpoint and 
+       * becomes the new "front" of the index. This allows the IndexFileDeleter 
+       * to delete files that are referenced only by stale checkpoints.
+       * (files that were created since the last commit, but are no longer
+       * referenced by the "front" of the index). For this, IndexFileDeleter 
+       * keeps track of the last non commit checkpoint.
+       */
+       public class IndexWriter : System.IDisposable
+       {
+               private void  InitBlock()
+               {
+                       similarity = Similarity.GetDefault();
+                       mergePolicy = new LogByteSizeMergePolicy(this);
+                       readerPool = new ReaderPool(this);
+               }
+               
+               /// <summary> Default value for the write lock timeout (1,000).</summary>
+               /// <seealso cref="setDefaultWriteLockTimeout">
+               /// </seealso>
+               public static long WRITE_LOCK_TIMEOUT = 1000;
+               
+               private long writeLockTimeout = WRITE_LOCK_TIMEOUT;
+               
+               /// <summary> Name of the write lock in the index.</summary>
+               public const System.String WRITE_LOCK_NAME = "write.lock";
+               
+               /// <deprecated>
+               /// </deprecated>
+               /// <seealso cref="LogMergePolicy.DEFAULT_MERGE_FACTOR">
+               /// </seealso>
+        [Obsolete("See LogMergePolicy.DEFAULT_MERGE_FACTOR")]
+               public static readonly int DEFAULT_MERGE_FACTOR;
+               
+               /// <summary> Value to denote a flush trigger is disabled</summary>
+               public const int DISABLE_AUTO_FLUSH = - 1;
+               
+               /// <summary> Disabled by default (because IndexWriter flushes by RAM usage
+               /// by default). Change using {@link #SetMaxBufferedDocs(int)}.
+               /// </summary>
+               public static readonly int DEFAULT_MAX_BUFFERED_DOCS = DISABLE_AUTO_FLUSH;
+               
+               /// <summary> Default value is 16 MB (which means flush when buffered
+               /// docs consume 16 MB RAM).  Change using {@link #setRAMBufferSizeMB}.
+               /// </summary>
+               public const double DEFAULT_RAM_BUFFER_SIZE_MB = 16.0;
+               
+               /// <summary> Disabled by default (because IndexWriter flushes by RAM usage
+               /// by default). Change using {@link #SetMaxBufferedDeleteTerms(int)}.
+               /// </summary>
+               public static readonly int DEFAULT_MAX_BUFFERED_DELETE_TERMS = DISABLE_AUTO_FLUSH;
+               
+               /// <deprecated>
+               /// </deprecated>
+               /// <seealso cref="LogDocMergePolicy.DEFAULT_MAX_MERGE_DOCS">
+               /// </seealso>
+        [Obsolete("See LogDocMergePolicy.DEFAULT_MAX_MERGE_DOCS")]
+               public static readonly int DEFAULT_MAX_MERGE_DOCS;
+               
+               /// <summary> Default value is 10,000. Change using {@link #SetMaxFieldLength(int)}.</summary>
+               public const int DEFAULT_MAX_FIELD_LENGTH = 10000;
+               
+               /// <summary> Default value is 128. Change using {@link #SetTermIndexInterval(int)}.</summary>
+               public const int DEFAULT_TERM_INDEX_INTERVAL = 128;
+               
+               /// <summary> Absolute hard maximum length for a term.  If a term
+               /// arrives from the analyzer longer than this length, it
+               /// is skipped and a message is printed to infoStream, if
+               /// set (see {@link #setInfoStream}).
+               /// </summary>
+               public static readonly int MAX_TERM_LENGTH;
+               
+               /// <summary> Default for {@link #getMaxSyncPauseSeconds}.  On
+               /// Windows this defaults to 10.0 seconds; elsewhere it's
+               /// 0.
+               /// </summary>
+               public static double DEFAULT_MAX_SYNC_PAUSE_SECONDS;
+               
+               // The normal read buffer size defaults to 1024, but
+               // increasing this during merging seems to yield
+               // performance gains.  However we don't want to increase
+               // it too much because there are quite a few
+               // BufferedIndexInputs created during merging.  See
+               // LUCENE-888 for details.
+               private const int MERGE_READ_BUFFER_SIZE = 4096;
+               
+               // Used for printing messages
+               private static System.Object MESSAGE_ID_LOCK = new System.Object();
+               private static int MESSAGE_ID = 0;
+               private int messageID = - 1;
+               private volatile bool hitOOM;
+               
+               private Directory directory; // where this index resides
+               private Analyzer analyzer; // how to analyze text
+               
+               private Similarity similarity; // how to normalize
+               
+               private volatile uint changeCount; // increments every time a change is completed
+               private long lastCommitChangeCount; // last changeCount that was committed
+               
+               private SegmentInfos rollbackSegmentInfos; // segmentInfos we will fallback to if the commit fails
+               private System.Collections.Hashtable rollbackSegments;
+               
+               internal volatile SegmentInfos pendingCommit; // set when a commit is pending (after prepareCommit() & before commit())
+               internal volatile uint pendingCommitChangeCount;
+               
+               private SegmentInfos localRollbackSegmentInfos; // segmentInfos we will fallback to if the commit fails
+               private bool localAutoCommit; // saved autoCommit during local transaction
+               private int localFlushedDocCount; // saved docWriter.getFlushedDocCount during local transaction
+               private bool autoCommit = true; // false if we should commit only on close
+               
+               private SegmentInfos segmentInfos = new SegmentInfos(); // the segments
+        private int optimizeMaxNumSegments;
+
+               private DocumentsWriter docWriter;
+               private IndexFileDeleter deleter;
+
+        private System.Collections.Hashtable segmentsToOptimize = new System.Collections.Hashtable(); // used by optimize to note those needing optimization
+               
+               private Lock writeLock;
+               
+               private int termIndexInterval = DEFAULT_TERM_INDEX_INTERVAL;
+               
+               private bool closeDir;
+               private bool closed;
+               private bool closing;
+               
+               // Holds all SegmentInfo instances currently involved in
+               // merges
+        private System.Collections.Hashtable mergingSegments = new System.Collections.Hashtable();
+               
+               private MergePolicy mergePolicy;
+               private MergeScheduler mergeScheduler = new ConcurrentMergeScheduler();
+        private System.Collections.Generic.LinkedList<MergePolicy.OneMerge> pendingMerges = new System.Collections.Generic.LinkedList<MergePolicy.OneMerge>();
+               private System.Collections.Generic.List<MergePolicy.OneMerge> runningMerges = new System.Collections.Generic.List<MergePolicy.OneMerge>();
+               private System.Collections.IList mergeExceptions = new System.Collections.ArrayList();
+               private long mergeGen;
+               private bool stopMerges;
+               
+               private int flushCount;
+               private int flushDeletesCount;
+               private double maxSyncPauseSeconds = DEFAULT_MAX_SYNC_PAUSE_SECONDS;
+               
+               // Used to only allow one addIndexes to proceed at once
+               // TODO: use ReadWriteLock once we are on 5.0
+               private int readCount; // count of how many threads are holding read lock
+               private SupportClass.ThreadClass writeThread; // non-null if any thread holds write lock
+               internal ReaderPool readerPool;
+               private int upgradeCount;
+
+        private int readerTermsIndexDivisor = IndexReader.DEFAULT_TERMS_INDEX_DIVISOR;
+               
+               // This is a "write once" variable (like the organic dye
+               // on a DVD-R that may or may not be heated by a laser and
+               // then cooled to permanently record the event): it's
+               // false, until getReader() is called for the first time,
+               // at which point it's switched to true and never changes
+               // back to false.  Once this is true, we hold open and
+               // reuse SegmentReader instances internally for applying
+               // deletes, doing merges, and reopening near real-time
+               // readers.
+               private volatile bool poolReaders;
+               
+               /// <summary> Expert: returns a readonly reader, covering all committed as well as
+               /// un-committed changes to the index. This provides "near real-time"
+               /// searching, in that changes made during an IndexWriter session can be
+               /// quickly made available for searching without closing the writer nor
+               /// calling {@link #commit}.
+               /// 
+               /// <p/>
+               /// Note that this is functionally equivalent to calling {#commit} and then
+               /// using {@link IndexReader#open} to open a new reader. But the turarnound
+               /// time of this method should be faster since it avoids the potentially
+               /// costly {@link #commit}.
+               /// <p/>
+               /// 
+        /// You must close the {@link IndexReader} returned by  this method once you are done using it.
+        /// 
+               /// <p/>
+               /// It's <i>near</i> real-time because there is no hard
+               /// guarantee on how quickly you can get a new reader after
+               /// making changes with IndexWriter.  You'll have to
+               /// experiment in your situation to determine if it's
+               /// faster enough.  As this is a new and experimental
+               /// feature, please report back on your findings so we can
+               /// learn, improve and iterate.<p/>
+               /// 
+               /// <p/>The resulting reader suppports {@link
+               /// IndexReader#reopen}, but that call will simply forward
+               /// back to this method (though this may change in the
+               /// future).<p/>
+               /// 
+               /// <p/>The very first time this method is called, this
+               /// writer instance will make every effort to pool the
+               /// readers that it opens for doing merges, applying
+               /// deletes, etc.  This means additional resources (RAM,
+               /// file descriptors, CPU time) will be consumed.<p/>
+               /// 
+               /// <p/>For lower latency on reopening a reader, you should call {@link #setMergedSegmentWarmer} 
+        /// to call {@link #setMergedSegmentWarmer} to
+               /// pre-warm a newly merged segment before it's committed
+               /// to the index. This is important for minimizing index-to-search 
+        /// delay after a large merge.
+               /// 
+               /// <p/>If an addIndexes* call is running in another thread,
+               /// then this reader will only search those segments from
+               /// the foreign index that have been successfully copied
+               /// over, so far<p/>.
+               /// 
+               /// <p/><b>NOTE</b>: Once the writer is closed, any
+               /// outstanding readers may continue to be used.  However,
+               /// if you attempt to reopen any of those readers, you'll
+               /// hit an {@link AlreadyClosedException}.<p/>
+               /// 
+               /// <p/><b>NOTE:</b> This API is experimental and might
+               /// change in incompatible ways in the next release.<p/>
+               /// 
+               /// </summary>
+               /// <returns> IndexReader that covers entire index plus all
+               /// changes made so far by this IndexWriter instance
+               /// 
+               /// </returns>
+               /// <throws>  IOException </throws>
+               public virtual IndexReader GetReader()
+               {
+            return GetReader(readerTermsIndexDivisor);
+               }
+               
+               /// <summary>Expert: like {@link #getReader}, except you can
+               /// specify which termInfosIndexDivisor should be used for
+               /// any newly opened readers.
+               /// </summary>
+               /// <param name="termInfosIndexDivisor">Subsambles which indexed
+               /// terms are loaded into RAM. This has the same effect as {@link
+               /// IndexWriter#setTermIndexInterval} except that setting
+               /// must be done at indexing time while this setting can be
+               /// set per reader.  When set to N, then one in every
+               /// N*termIndexInterval terms in the index is loaded into
+               /// memory.  By setting this to a value > 1 you can reduce
+               /// memory usage, at the expense of higher latency when
+               /// loading a TermInfo.  The default value is 1.  Set this
+               /// to -1 to skip loading the terms index entirely. 
+               /// </param>
+               public virtual IndexReader GetReader(int termInfosIndexDivisor)
+               {
+            EnsureOpen();
+
+                       if (infoStream != null)
+                       {
+                               Message("flush at getReader");
+                       }
+                       
+                       // Do this up front before flushing so that the readers
+                       // obtained during this flush are pooled, the first time
+                       // this method is called:
+                       poolReaders = true;
+                       
+                       // Prevent segmentInfos from changing while opening the
+                       // reader; in theory we could do similar retry logic,
+                       // just like we do when loading segments_N
+            IndexReader r;
+                       lock (this)
+                       {
+                Flush(false, true, true);
+                r = new ReadOnlyDirectoryReader(this, segmentInfos, termInfosIndexDivisor);
+                       }
+            MaybeMerge();
+            return r;
+               }
+               
+               /// <summary>Holds shared SegmentReader instances. IndexWriter uses
+               /// SegmentReaders for 1) applying deletes, 2) doing
+               /// merges, 3) handing out a real-time reader.  This pool
+               /// reuses instances of the SegmentReaders in all these
+               /// places if it is in "near real-time mode" (getReader()
+               /// has been called on this instance). 
+               /// </summary>
+               
+               internal class ReaderPool
+               {
+                       public ReaderPool(IndexWriter enclosingInstance)
+                       {
+                               InitBlock(enclosingInstance);
+                       }
+                       private void  InitBlock(IndexWriter enclosingInstance)
+                       {
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private IndexWriter enclosingInstance;
+                       public IndexWriter Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       
+                       private System.Collections.IDictionary readerMap = new System.Collections.Hashtable();
+                       
+                       /// <summary>Forcefully clear changes for the specifed segments,
+                       /// and remove from the pool.   This is called on succesful merge. 
+                       /// </summary>
+                       internal virtual void  Clear(SegmentInfos infos)
+                       {
+                               lock (this)
+                               {
+                                       if (infos == null)
+                                       {
+                        System.Collections.IEnumerator iter = new System.Collections.Hashtable(readerMap).GetEnumerator();
+                                               while (iter.MoveNext())
+                                               {
+                                                       System.Collections.DictionaryEntry ent = (System.Collections.DictionaryEntry) iter.Current;
+                                                       ((SegmentReader) ent.Value).hasChanges = false;
+                                               }
+                                       }
+                                       else
+                                       {
+                                               int numSegments = infos.Count;
+                                               for (int i = 0; i < numSegments; i++)
+                                               {
+                                                       SegmentInfo info = infos.Info(i);
+                                                       if (readerMap.Contains(info))
+                                                       {
+                                                               ((SegmentReader) readerMap[info]).hasChanges = false;
+                                                       }
+                                               }
+                                       }
+                               }
+                       }
+                       
+                       // used only by asserts
+                       public virtual bool InfoIsLive(SegmentInfo info)
+                       {
+                               lock (this)
+                               {
+                                       int idx = Enclosing_Instance.segmentInfos.IndexOf(info);
+                                       System.Diagnostics.Debug.Assert(idx != -1);
+                    System.Diagnostics.Debug.Assert(Enclosing_Instance.segmentInfos[idx] == info);
+                                       return true;
+                               }
+                       }
+                       
+                       public virtual SegmentInfo MapToLive(SegmentInfo info)
+                       {
+                               lock (this)
+                               {
+                                       int idx = Enclosing_Instance.segmentInfos.IndexOf(info);
+                                       if (idx != - 1)
+                                       {
+                                               info = (SegmentInfo) Enclosing_Instance.segmentInfos[idx];
+                                       }
+                                       return info;
+                               }
+                       }
+                       
+                       /// <summary> Release the segment reader (i.e. decRef it and close if there
+                       /// are no more references.
+                       /// </summary>
+                       /// <param name="sr">
+                       /// </param>
+                       /// <throws>  IOException </throws>
+                       public virtual void  Release(SegmentReader sr)
+                       {
+                               lock (this)
+                               {
+                                       Release(sr, false);
+                               }
+                       }
+                       
+                       /// <summary> Release the segment reader (i.e. decRef it and close if there
+                       /// are no more references.
+                       /// </summary>
+                       /// <param name="sr">
+                       /// </param>
+                       /// <throws>  IOException </throws>
+                       public virtual void  Release(SegmentReader sr, bool drop)
+                       {
+                               lock (this)
+                               {
+                                       
+                                       bool pooled = readerMap.Contains(sr.GetSegmentInfo());
+
+                    System.Diagnostics.Debug.Assert(!pooled || readerMap[sr.GetSegmentInfo()] == sr);
+
+                    // Drop caller's ref; for an external reader (not
+                    // pooled), this decRef will close it
+                                       sr.DecRef();
+                                       
+                                       if (pooled && (drop || (!Enclosing_Instance.poolReaders && sr.GetRefCount() == 1)))
+                                       {
+
+                        // We invoke deleter.checkpoint below, so we must be
+                        // sync'd on IW if there are changes:
+                                               
+                                               // TODO: java 5
+                                               // assert !sr.hasChanges || Thread.holdsLock(IndexWriter.this);
+
+                        // Discard (don't save) changes when we are dropping
+                        // the reader; this is used only on the sub-readers
+                        // after a successful merge.
+                        sr.hasChanges &= !drop;
+
+                        bool hasChanges = sr.hasChanges;
+                                               
+                                               // Drop our ref -- this will commit any pending
+                                               // changes to the dir
+                        sr.Close();
+
+                        // We are the last ref to this reader; since we're
+                        // not pooling readers, we release it:
+                        readerMap.Remove(sr.GetSegmentInfo());
+
+                        if (hasChanges)
+                        {
+                            // Must checkpoint w/ deleter, because this
+                            // segment reader will have created new _X_N.del
+                            // file.
+                            enclosingInstance.deleter.Checkpoint(enclosingInstance.segmentInfos, false);
+                        }
+                                       }
+                               }
+                       }
+                       
+                       /// <summary>Remove all our references to readers, and commits
+                       /// any pending changes. 
+                       /// </summary>
+                       internal virtual void  Close()
+                       {
+                               lock (this)
+                               {
+                    System.Collections.IEnumerator iter = new System.Collections.Hashtable(readerMap).GetEnumerator();
+                                       while (iter.MoveNext())
+                                       {
+                                               System.Collections.DictionaryEntry ent = (System.Collections.DictionaryEntry) iter.Current;
+                                               
+                                               SegmentReader sr = (SegmentReader) ent.Value;
+                                               if (sr.hasChanges)
+                                               {
+                                                       System.Diagnostics.Debug.Assert(InfoIsLive(sr.GetSegmentInfo()));
+                                                       sr.DoCommit(null);
+                            // Must checkpoint w/ deleter, because this
+                            // segment reader will have created new _X_N.del
+                            // file.
+                            enclosingInstance.deleter.Checkpoint(enclosingInstance.segmentInfos, false);
+                                               }
+
+                        readerMap.Remove(ent.Key); 
+                                               
+                                               // NOTE: it is allowed that this decRef does not
+                                               // actually close the SR; this can happen when a
+                                               // near real-time reader is kept open after the
+                                               // IndexWriter instance is closed
+                                               sr.DecRef();
+                                       }
+                               }
+                       }
+                       
+                       /// <summary> Commit all segment reader in the pool.</summary>
+                       /// <throws>  IOException </throws>
+                       internal virtual void  Commit()
+                       {
+                               lock (this)
+                               {
+                    System.Collections.IEnumerator iter = new System.Collections.Hashtable(readerMap).GetEnumerator();
+                                       while (iter.MoveNext())
+                                       {
+                                               System.Collections.DictionaryEntry ent = (System.Collections.DictionaryEntry) iter.Current;
+                                               
+                                               SegmentReader sr = (SegmentReader) ent.Value;
+                                               if (sr.hasChanges)
+                                               {
+                                                       System.Diagnostics.Debug.Assert(InfoIsLive(sr.GetSegmentInfo()));
+                                                       sr.DoCommit(null);
+                            // Must checkpoint w/ deleter, because this
+                            // segment reader will have created new _X_N.del
+                            // file.
+                            enclosingInstance.deleter.Checkpoint(enclosingInstance.segmentInfos, false);
+                                               }
+                                       }
+                               }
+                       }
+                       
+                       /// <summary> Returns a ref to a clone.  NOTE: this clone is not
+                       /// enrolled in the pool, so you should simply close()
+                       /// it when you're done (ie, do not call release()).
+                       /// </summary>
+                       public virtual SegmentReader GetReadOnlyClone(SegmentInfo info, bool doOpenStores, int termInfosIndexDivisor)
+                       {
+                               lock (this)
+                               {
+                                       SegmentReader sr = Get(info, doOpenStores, BufferedIndexInput.BUFFER_SIZE, termInfosIndexDivisor);
+                                       try
+                                       {
+                                               return (SegmentReader) sr.Clone(true);
+                                       }
+                                       finally
+                                       {
+                                               sr.DecRef();
+                                       }
+                               }
+                       }
+                       
+                       /// <summary> Obtain a SegmentReader from the readerPool.  The reader
+                       /// must be returned by calling {@link #Release(SegmentReader)}
+                       /// </summary>
+                       /// <seealso cref="Release(SegmentReader)">
+                       /// </seealso>
+                       /// <param name="info">
+                       /// </param>
+                       /// <param name="doOpenStores">
+                       /// </param>
+                       /// <throws>  IOException </throws>
+                       public virtual SegmentReader Get(SegmentInfo info, bool doOpenStores)
+                       {
+                               lock (this)
+                               {
+                    return Get(info, doOpenStores, BufferedIndexInput.BUFFER_SIZE, enclosingInstance.readerTermsIndexDivisor);
+                               }
+                       }
+                       /// <summary> Obtain a SegmentReader from the readerPool.  The reader
+                       /// must be returned by calling {@link #Release(SegmentReader)}
+                       /// 
+                       /// </summary>
+                       /// <seealso cref="Release(SegmentReader)">
+                       /// </seealso>
+                       /// <param name="info">
+                       /// </param>
+                       /// <param name="doOpenStores">
+                       /// </param>
+                       /// <param name="readBufferSize">
+                       /// </param>
+                       /// <param name="termsIndexDivisor">
+                       /// </param>
+                       /// <throws>  IOException </throws>
+                       public virtual SegmentReader Get(SegmentInfo info, bool doOpenStores, int readBufferSize, int termsIndexDivisor)
+                       {
+                               lock (this)
+                               {
+                                       
+                                       if (Enclosing_Instance.poolReaders)
+                                       {
+                                               readBufferSize = BufferedIndexInput.BUFFER_SIZE;
+                                       }
+                                       
+                                       SegmentReader sr = (SegmentReader) readerMap[info];
+                                       if (sr == null)
+                                       {
+                                               // TODO: we may want to avoid doing this while
+                                               // synchronized
+                                               // Returns a ref, which we xfer to readerMap:
+                                               sr = SegmentReader.Get(info, readBufferSize, doOpenStores, termsIndexDivisor);
+                        if (info.dir == enclosingInstance.directory)
+                        {
+                            // Only pool if reader is not external
+                            readerMap[info]=sr;
+                        }
+                                       }
+                                       else
+                                       {
+                                               if (doOpenStores)
+                                               {
+                                                       sr.OpenDocStores();
+                                               }
+                                               if (termsIndexDivisor != - 1 && !sr.TermsIndexLoaded())
+                                               {
+                                                       // If this reader was originally opened because we
+                                                       // needed to merge it, we didn't load the terms
+                                                       // index.  But now, if the caller wants the terms
+                                                       // index (eg because it's doing deletes, or an NRT
+                                                       // reader is being opened) we ask the reader to
+                                                       // load its terms index.
+                                                       sr.LoadTermsIndex(termsIndexDivisor);
+                                               }
+                                       }
+                                       
+                                       // Return a ref to our caller
+                    if (info.dir == enclosingInstance.directory)
+                    {
+                        // Only incRef if we pooled (reader is not external)
+                        sr.IncRef();
+                    }
+                                       return sr;
+                               }
+                       }
+                       
+                       // Returns a ref
+                       public virtual SegmentReader GetIfExists(SegmentInfo info)
+                       {
+                               lock (this)
+                               {
+                                       SegmentReader sr = (SegmentReader) readerMap[info];
+                                       if (sr != null)
+                                       {
+                                               sr.IncRef();
+                                       }
+                                       return sr;
+                               }
+                       }
+               }
+               
+               /// <summary> Obtain the number of deleted docs for a pooled reader.
+               /// If the reader isn't being pooled, the segmentInfo's 
+               /// delCount is returned.
+               /// </summary>
+               public virtual int NumDeletedDocs(SegmentInfo info)
+               {
+                       SegmentReader reader = readerPool.GetIfExists(info);
+                       try
+                       {
+                               if (reader != null)
+                               {
+                                       return reader.NumDeletedDocs();
+                               }
+                               else
+                               {
+                                       return info.GetDelCount();
+                               }
+                       }
+                       finally
+                       {
+                               if (reader != null)
+                               {
+                                       readerPool.Release(reader);
+                               }
+                       }
+               }
+               
+               internal virtual void  AcquireWrite()
+               {
+                       lock (this)
+                       {
+                               System.Diagnostics.Debug.Assert(writeThread != SupportClass.ThreadClass.Current());
+                               while (writeThread != null || readCount > 0)
+                                       DoWait();
+                               
+                               // We could have been closed while we were waiting:
+                               EnsureOpen();
+                               
+                               writeThread = SupportClass.ThreadClass.Current();
+                       }
+               }
+               
+               internal virtual void  ReleaseWrite()
+               {
+                       lock (this)
+                       {
+                               System.Diagnostics.Debug.Assert(SupportClass.ThreadClass.Current() == writeThread);
+                               writeThread = null;
+                               System.Threading.Monitor.PulseAll(this);
+                       }
+               }
+               
+               internal virtual void  AcquireRead()
+               {
+                       lock (this)
+                       {
+                               SupportClass.ThreadClass current = SupportClass.ThreadClass.Current();
+                               while (writeThread != null && writeThread != current)
+                                       DoWait();
+                               
+                               readCount++;
+                       }
+               }
+               
+               // Allows one readLock to upgrade to a writeLock even if
+               // there are other readLocks as long as all other
+               // readLocks are also blocked in this method:
+               internal virtual void  UpgradeReadToWrite()
+               {
+                       lock (this)
+                       {
+                               System.Diagnostics.Debug.Assert(readCount > 0);
+                               upgradeCount++;
+                               while (readCount > upgradeCount || writeThread != null)
+                               {
+                                       DoWait();
+                               }
+                               
+                               writeThread = SupportClass.ThreadClass.Current();
+                               readCount--;
+                               upgradeCount--;
+                       }
+               }
+               
+               internal virtual void  ReleaseRead()
+               {
+                       lock (this)
+                       {
+                               readCount--;
+                               System.Diagnostics.Debug.Assert(readCount >= 0);
+                               System.Threading.Monitor.PulseAll(this);
+                       }
+               }
+               
+               internal bool IsOpen(bool includePendingClose)
+               {
+                       lock (this)
+                       {
+                               return !(closed || (includePendingClose && closing));
+                       }
+               }
+               
+               /// <summary> Used internally to throw an {@link
+               /// AlreadyClosedException} if this IndexWriter has been
+               /// closed.
+               /// </summary>
+               /// <throws>  AlreadyClosedException if this IndexWriter is </throws>
+               protected internal void  EnsureOpen(bool includePendingClose)
+               {
+                       lock (this)
+                       {
+                               if (!IsOpen(includePendingClose))
+                               {
+                                       throw new AlreadyClosedException("this IndexWriter is closed");
+                               }
+                       }
+               }
+               
+               protected internal void  EnsureOpen()
+               {
+                       lock (this)
+                       {
+                               EnsureOpen(true);
+                       }
+               }
+               
+               /// <summary> Prints a message to the infoStream (if non-null),
+               /// prefixed with the identifying information for this
+               /// writer and the thread that's calling it.
+               /// </summary>
+               public virtual void  Message(System.String message)
+               {
+                       if (infoStream != null)
+                infoStream.WriteLine("IW " + messageID + " [" + DateTime.Now.ToString() + "; " + SupportClass.ThreadClass.Current().Name + "]: " + message);
+               }
+               
+               private void  SetMessageID(System.IO.StreamWriter infoStream)
+               {
+                       lock (this)
+                       {
+                               if (infoStream != null && messageID == - 1)
+                               {
+                                       lock (MESSAGE_ID_LOCK)
+                                       {
+                                               messageID = MESSAGE_ID++;
+                                       }
+                               }
+                               this.infoStream = infoStream;
+                       }
+               }
+               
+               /// <summary> Casts current mergePolicy to LogMergePolicy, and throws
+               /// an exception if the mergePolicy is not a LogMergePolicy.
+               /// </summary>
+               private LogMergePolicy GetLogMergePolicy()
+               {
+                       if (mergePolicy is LogMergePolicy)
+                               return (LogMergePolicy) mergePolicy;
+                       else
+                               throw new System.ArgumentException("this method can only be called when the merge policy is the default LogMergePolicy");
+               }
+               
+               /// <summary><p/>Get the current setting of whether newly flushed
+               /// segments will use the compound file format.  Note that
+               /// this just returns the value previously set with
+               /// setUseCompoundFile(boolean), or the default value
+               /// (true).  You cannot use this to query the status of
+               /// previously flushed segments.<p/>
+               /// 
+               /// <p/>Note that this method is a convenience method: it
+               /// just calls mergePolicy.getUseCompoundFile as long as
+               /// mergePolicy is an instance of {@link LogMergePolicy}.
+               /// Otherwise an IllegalArgumentException is thrown.<p/>
+               /// 
+               /// </summary>
+               /// <seealso cref="SetUseCompoundFile(boolean)">
+               /// </seealso>
+               public virtual bool GetUseCompoundFile()
+               {
+                       return GetLogMergePolicy().GetUseCompoundFile();
+               }
+               
+               /// <summary><p/>Setting to turn on usage of a compound file. When on,
+               /// multiple files for each segment are merged into a
+               /// single file when a new segment is flushed.<p/>
+               /// 
+               /// <p/>Note that this method is a convenience method: it
+               /// just calls mergePolicy.setUseCompoundFile as long as
+               /// mergePolicy is an instance of {@link LogMergePolicy}.
+               /// Otherwise an IllegalArgumentException is thrown.<p/>
+               /// </summary>
+               public virtual void  SetUseCompoundFile(bool value_Renamed)
+               {
+                       GetLogMergePolicy().SetUseCompoundFile(value_Renamed);
+                       GetLogMergePolicy().SetUseCompoundDocStore(value_Renamed);
+               }
+               
+               /// <summary>Expert: Set the Similarity implementation used by this IndexWriter.
+               /// 
+               /// </summary>
+               /// <seealso cref="Similarity.SetDefault(Similarity)">
+               /// </seealso>
+               public virtual void  SetSimilarity(Similarity similarity)
+               {
+                       EnsureOpen();
+                       this.similarity = similarity;
+                       docWriter.SetSimilarity(similarity);
+               }
+               
+               /// <summary>Expert: Return the Similarity implementation used by this IndexWriter.
+               /// 
+               /// <p/>This defaults to the current value of {@link Similarity#GetDefault()}.
+               /// </summary>
+               public virtual Similarity GetSimilarity()
+               {
+                       EnsureOpen();
+                       return this.similarity;
+               }
+               
+               /// <summary>Expert: Set the interval between indexed terms.  Large values cause less
+               /// memory to be used by IndexReader, but slow random-access to terms.  Small
+               /// values cause more memory to be used by an IndexReader, and speed
+               /// random-access to terms.
+               /// 
+               /// This parameter determines the amount of computation required per query
+               /// term, regardless of the number of documents that contain that term.  In
+               /// particular, it is the maximum number of other terms that must be
+               /// scanned before a term is located and its frequency and position information
+               /// may be processed.  In a large index with user-entered query terms, query
+               /// processing time is likely to be dominated not by term lookup but rather
+               /// by the processing of frequency and positional data.  In a small index
+               /// or when many uncommon query terms are generated (e.g., by wildcard
+               /// queries) term lookup may become a dominant cost.
+               /// 
+               /// In particular, <code>numUniqueTerms/interval</code> terms are read into
+               /// memory by an IndexReader, and, on average, <code>interval/2</code> terms
+               /// must be scanned for each random term access.
+               /// 
+               /// </summary>
+               /// <seealso cref="DEFAULT_TERM_INDEX_INTERVAL">
+               /// </seealso>
+               public virtual void  SetTermIndexInterval(int interval)
+               {
+                       EnsureOpen();
+                       this.termIndexInterval = interval;
+               }
+               
+               /// <summary>Expert: Return the interval between indexed terms.
+               /// 
+               /// </summary>
+               /// <seealso cref="SetTermIndexInterval(int)">
+               /// </seealso>
+               public virtual int GetTermIndexInterval()
+               {
+                       // We pass false because this method is called by SegmentMerger while we are in the process of closing
+                       EnsureOpen(false);
+                       return termIndexInterval;
+               }
+               
+               /// <summary> Constructs an IndexWriter for the index in <code>path</code>.
+               /// Text will be analyzed with <code>a</code>.  If <code>create</code>
+               /// is true, then a new, empty index will be created in
+               /// <code>path</code>, replacing the index already there,
+               /// if any.
+               /// 
+               /// <p/><b>NOTE</b>: autoCommit (see <a
+               /// href="#autoCommit">above</a>) is set to false with this
+               /// constructor.
+               /// 
+               /// </summary>
+               /// <param name="path">the path to the index directory
+               /// </param>
+               /// <param name="a">the analyzer to use
+               /// </param>
+               /// <param name="create"><code>true</code> to create the index or overwrite
+               /// the existing one; <code>false</code> to append to the existing
+               /// index
+               /// </param>
+               /// <param name="mfl">Maximum field length in number of tokens/terms: LIMITED, UNLIMITED, or user-specified
+               /// via the MaxFieldLength constructor.
+               /// </param>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  LockObtainFailedException if another writer </throws>
+               /// <summary>  has this index open (<code>write.lock</code> could not
+               /// be obtained)
+               /// </summary>
+               /// <throws>  IOException if the directory cannot be read/written to, or </throws>
+               /// <summary>  if it does not exist and <code>create</code> is
+               /// <code>false</code> or if there is any other low-level
+               /// IO error
+               /// </summary>
+               /// <deprecated> Use {@link #IndexWriter(Directory, Analyzer,
+               /// boolean, MaxFieldLength)}
+        /// </deprecated>
+        [Obsolete("Use IndexWriter(Directory, Analyzer,boolean, MaxFieldLength)")]
+        public IndexWriter(System.String path, Analyzer a, bool create, MaxFieldLength mfl)
+               {
+                       InitBlock();
+                       Init(FSDirectory.GetDirectory(path), a, create, true, null, false, mfl.GetLimit(), null, null);
+               }
+               
+               /// <summary> Constructs an IndexWriter for the index in <code>path</code>.
+               /// Text will be analyzed with <code>a</code>.  If <code>create</code>
+               /// is true, then a new, empty index will be created in
+               /// <code>path</code>, replacing the index already there, if any.
+               /// 
+               /// </summary>
+               /// <param name="path">the path to the index directory
+               /// </param>
+               /// <param name="a">the analyzer to use
+               /// </param>
+               /// <param name="create"><code>true</code> to create the index or overwrite
+               /// the existing one; <code>false</code> to append to the existing
+               /// index
+               /// </param>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  LockObtainFailedException if another writer </throws>
+               /// <summary>  has this index open (<code>write.lock</code> could not
+               /// be obtained)
+               /// </summary>
+               /// <throws>  IOException if the directory cannot be read/written to, or </throws>
+               /// <summary>  if it does not exist and <code>create</code> is
+               /// <code>false</code> or if there is any other low-level
+               /// IO error
+               /// </summary>
+               /// <deprecated> This constructor will be removed in the 3.0 release.
+               /// Use {@link
+               /// #IndexWriter(Directory,Analyzer,boolean,MaxFieldLength)}
+               /// instead, and call {@link #Commit()} when needed.
+               /// </deprecated>
+        [Obsolete("This constructor will be removed in the 3.0 release. Use IndexWriter(Directory,Analyzer,bool,MaxFieldLength) instead, and call Commit() when needed")]
+               public IndexWriter(System.String path, Analyzer a, bool create)
+               {
+                       InitBlock();
+                       Init(FSDirectory.GetDirectory(path), a, create, true, null, true, DEFAULT_MAX_FIELD_LENGTH, null, null);
+               }
+               
+               /// <summary> Constructs an IndexWriter for the index in <code>path</code>.
+               /// Text will be analyzed with <code>a</code>.  If <code>create</code>
+               /// is true, then a new, empty index will be created in
+               /// <code>path</code>, replacing the index already there, if any.
+               /// 
+               /// <p/><b>NOTE</b>: autoCommit (see <a
+               /// href="#autoCommit">above</a>) is set to false with this
+               /// constructor.
+               /// 
+               /// </summary>
+               /// <param name="path">the path to the index directory
+               /// </param>
+               /// <param name="a">the analyzer to use
+               /// </param>
+               /// <param name="create"><code>true</code> to create the index or overwrite
+               /// the existing one; <code>false</code> to append to the existing
+               /// index
+               /// </param>
+               /// <param name="mfl">Maximum field length in number of terms/tokens: LIMITED, UNLIMITED, or user-specified
+               /// via the MaxFieldLength constructor.
+               /// </param>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  LockObtainFailedException if another writer </throws>
+               /// <summary>  has this index open (<code>write.lock</code> could not
+               /// be obtained)
+               /// </summary>
+               /// <throws>  IOException if the directory cannot be read/written to, or </throws>
+               /// <summary>  if it does not exist and <code>create</code> is
+               /// <code>false</code> or if there is any other low-level
+               /// IO error
+               /// </summary>
+               /// <deprecated> Use {@link #IndexWriter(Directory,
+               /// Analyzer, boolean, MaxFieldLength)}
+               /// </deprecated>
+        [Obsolete("Use IndexWriter(Directory, Analyzer, boolean, MaxFieldLength)")]
+               public IndexWriter(System.IO.FileInfo path, Analyzer a, bool create, MaxFieldLength mfl)
+               {
+                       InitBlock();
+                       Init(FSDirectory.GetDirectory(path), a, create, true, null, false, mfl.GetLimit(), null, null);
+               }
+               
+               /// <summary> Constructs an IndexWriter for the index in <code>path</code>.
+               /// Text will be analyzed with <code>a</code>.  If <code>create</code>
+               /// is true, then a new, empty index will be created in
+               /// <code>path</code>, replacing the index already there, if any.
+               /// 
+               /// </summary>
+               /// <param name="path">the path to the index directory
+               /// </param>
+               /// <param name="a">the analyzer to use
+               /// </param>
+               /// <param name="create"><code>true</code> to create the index or overwrite
+               /// the existing one; <code>false</code> to append to the existing
+               /// index
+               /// </param>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  LockObtainFailedException if another writer </throws>
+               /// <summary>  has this index open (<code>write.lock</code> could not
+               /// be obtained)
+               /// </summary>
+               /// <throws>  IOException if the directory cannot be read/written to, or </throws>
+               /// <summary>  if it does not exist and <code>create</code> is
+               /// <code>false</code> or if there is any other low-level
+               /// IO error
+               /// </summary>
+               /// <deprecated> This constructor will be removed in the 3.0 release.
+               /// Use {@link
+               /// #IndexWriter(Directory,Analyzer,boolean,MaxFieldLength)}
+               /// instead, and call {@link #Commit()} when needed.
+               /// </deprecated>
+        [Obsolete("This constructor will be removed in the 3.0 release. Use IndexWriter(Directory,Analyzer,bool,MaxFieldLength) instead, and call Commit() when needed.")]
+               public IndexWriter(System.IO.FileInfo path, Analyzer a, bool create)
+               {
+                       InitBlock();
+                       Init(FSDirectory.GetDirectory(path), a, create, true, null, true, DEFAULT_MAX_FIELD_LENGTH, null, null);
+               }
+               
+               /// <summary> Constructs an IndexWriter for the index in <code>d</code>.
+               /// Text will be analyzed with <code>a</code>.  If <code>create</code>
+               /// is true, then a new, empty index will be created in
+               /// <code>d</code>, replacing the index already there, if any.
+               /// 
+               /// <p/><b>NOTE</b>: autoCommit (see <a
+               /// href="#autoCommit">above</a>) is set to false with this
+               /// constructor.
+               /// 
+               /// </summary>
+               /// <param name="d">the index directory
+               /// </param>
+               /// <param name="a">the analyzer to use
+               /// </param>
+               /// <param name="create"><code>true</code> to create the index or overwrite
+               /// the existing one; <code>false</code> to append to the existing
+               /// index
+               /// </param>
+               /// <param name="mfl">Maximum field length in number of terms/tokens: LIMITED, UNLIMITED, or user-specified
+               /// via the MaxFieldLength constructor.
+               /// </param>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  LockObtainFailedException if another writer </throws>
+               /// <summary>  has this index open (<code>write.lock</code> could not
+               /// be obtained)
+               /// </summary>
+               /// <throws>  IOException if the directory cannot be read/written to, or </throws>
+               /// <summary>  if it does not exist and <code>create</code> is
+               /// <code>false</code> or if there is any other low-level
+               /// IO error
+               /// </summary>
+               public IndexWriter(Directory d, Analyzer a, bool create, MaxFieldLength mfl)
+               {
+                       InitBlock();
+                       Init(d, a, create, false, null, false, mfl.GetLimit(), null, null);
+               }
+               
+               /// <summary> Constructs an IndexWriter for the index in <code>d</code>.
+               /// Text will be analyzed with <code>a</code>.  If <code>create</code>
+               /// is true, then a new, empty index will be created in
+               /// <code>d</code>, replacing the index already there, if any.
+               /// 
+               /// </summary>
+               /// <param name="d">the index directory
+               /// </param>
+               /// <param name="a">the analyzer to use
+               /// </param>
+               /// <param name="create"><code>true</code> to create the index or overwrite
+               /// the existing one; <code>false</code> to append to the existing
+               /// index
+               /// </param>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  LockObtainFailedException if another writer </throws>
+               /// <summary>  has this index open (<code>write.lock</code> could not
+               /// be obtained)
+               /// </summary>
+               /// <throws>  IOException if the directory cannot be read/written to, or </throws>
+               /// <summary>  if it does not exist and <code>create</code> is
+               /// <code>false</code> or if there is any other low-level
+               /// IO error
+               /// </summary>
+               /// <deprecated> This constructor will be removed in the 3.0
+               /// release, and call {@link #Commit()} when needed.
+               /// Use {@link #IndexWriter(Directory,Analyzer,boolean,MaxFieldLength)} instead.
+               /// </deprecated>
+        [Obsolete("This constructor will be removed in the 3.0 release, and call Commit() when needed. Use IndexWriter(Directory,Analyzer,bool,MaxFieldLength) instead.")]
+               public IndexWriter(Directory d, Analyzer a, bool create)
+               {
+                       InitBlock();
+                       Init(d, a, create, false, null, true, DEFAULT_MAX_FIELD_LENGTH, null, null);
+               }
+               
+               /// <summary> Constructs an IndexWriter for the index in
+               /// <code>path</code>, first creating it if it does not
+               /// already exist.  Text will be analyzed with
+               /// <code>a</code>.
+               /// 
+               /// <p/><b>NOTE</b>: autoCommit (see <a
+               /// href="#autoCommit">above</a>) is set to false with this
+               /// constructor.
+               /// 
+               /// </summary>
+               /// <param name="path">the path to the index directory
+               /// </param>
+               /// <param name="a">the analyzer to use
+               /// </param>
+               /// <param name="mfl">Maximum field length in number of terms/tokens: LIMITED, UNLIMITED, or user-specified
+               /// via the MaxFieldLength constructor.
+               /// </param>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  LockObtainFailedException if another writer </throws>
+               /// <summary>  has this index open (<code>write.lock</code> could not
+               /// be obtained)
+               /// </summary>
+               /// <throws>  IOException if the directory cannot be </throws>
+               /// <summary>  read/written to or if there is any other low-level
+               /// IO error
+               /// </summary>
+               /// <deprecated> Use {@link #IndexWriter(Directory, Analyzer, MaxFieldLength)}
+               /// </deprecated>
+        [Obsolete("Use IndexWriter(Directory, Analyzer, MaxFieldLength)")]
+               public IndexWriter(System.String path, Analyzer a, MaxFieldLength mfl)
+               {
+                       InitBlock();
+                       Init(FSDirectory.GetDirectory(path), a, true, null, false, mfl.GetLimit(), null, null);
+               }
+               
+               /// <summary> Constructs an IndexWriter for the index in
+               /// <code>path</code>, first creating it if it does not
+               /// already exist.  Text will be analyzed with
+               /// <code>a</code>.
+               /// 
+               /// </summary>
+               /// <param name="path">the path to the index directory
+               /// </param>
+               /// <param name="a">the analyzer to use
+               /// </param>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  LockObtainFailedException if another writer </throws>
+               /// <summary>  has this index open (<code>write.lock</code> could not
+               /// be obtained)
+               /// </summary>
+               /// <throws>  IOException if the directory cannot be </throws>
+               /// <summary>  read/written to or if there is any other low-level
+               /// IO error
+               /// </summary>
+               /// <deprecated> This constructor will be removed in the 3.0
+               /// release, and call {@link #Commit()} when needed.
+               /// Use {@link #IndexWriter(Directory,Analyzer,MaxFieldLength)} instead.
+               /// </deprecated>
+        [Obsolete("This constructor will be removed in the 3.0 release, and call Commit() when needed. Use IndexWriter(Directory,Analyzer,MaxFieldLength) instead.")]
+               public IndexWriter(System.String path, Analyzer a)
+               {
+                       InitBlock();
+                       Init(FSDirectory.GetDirectory(path), a, true, null, true, DEFAULT_MAX_FIELD_LENGTH, null, null);
+               }
+               
+               /// <summary> Constructs an IndexWriter for the index in
+               /// <code>path</code>, first creating it if it does not
+               /// already exist.  Text will be analyzed with
+               /// <code>a</code>.
+               /// 
+               /// <p/><b>NOTE</b>: autoCommit (see <a
+               /// href="#autoCommit">above</a>) is set to false with this
+               /// constructor.
+               /// 
+               /// </summary>
+               /// <param name="path">the path to the index directory
+               /// </param>
+               /// <param name="a">the analyzer to use
+               /// </param>
+               /// <param name="mfl">Maximum field length in number of terms/tokens: LIMITED, UNLIMITED, or user-specified
+               /// via the MaxFieldLength constructor.
+               /// </param>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  LockObtainFailedException if another writer </throws>
+               /// <summary>  has this index open (<code>write.lock</code> could not
+               /// be obtained)
+               /// </summary>
+               /// <throws>  IOException if the directory cannot be </throws>
+               /// <summary>  read/written to or if there is any other low-level
+               /// IO error
+               /// </summary>
+               /// <deprecated> Use {@link #IndexWriter(Directory,
+               /// Analyzer, MaxFieldLength)}
+               /// </deprecated>
+        [Obsolete("Use {@link #IndexWriter(Directory,Analyzer, MaxFieldLength)")]
+               public IndexWriter(System.IO.FileInfo path, Analyzer a, MaxFieldLength mfl)
+               {
+                       InitBlock();
+                       Init(FSDirectory.GetDirectory(path), a, true, null, false, mfl.GetLimit(), null, null);
+               }
+               
+               /// <summary> Constructs an IndexWriter for the index in
+               /// <code>path</code>, first creating it if it does not
+               /// already exist.  Text will be analyzed with
+               /// <code>a</code>.
+               /// 
+               /// </summary>
+               /// <param name="path">the path to the index directory
+               /// </param>
+               /// <param name="a">the analyzer to use
+               /// </param>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  LockObtainFailedException if another writer </throws>
+               /// <summary>  has this index open (<code>write.lock</code> could not
+               /// be obtained)
+               /// </summary>
+               /// <throws>  IOException if the directory cannot be </throws>
+               /// <summary>  read/written to or if there is any other low-level
+               /// IO error
+               /// </summary>
+               /// <deprecated> This constructor will be removed in the 3.0 release.
+               /// Use {@link #IndexWriter(Directory,Analyzer,MaxFieldLength)}
+               /// instead, and call {@link #Commit()} when needed.
+               /// </deprecated>
+        [Obsolete("This constructor will be removed in the 3.0 release. Use IndexWriter(Directory,Analyzer,MaxFieldLength) instead, and call Commit() when needed.")]
+               public IndexWriter(System.IO.FileInfo path, Analyzer a)
+               {
+                       InitBlock();
+                       Init(FSDirectory.GetDirectory(path), a, true, null, true, DEFAULT_MAX_FIELD_LENGTH, null, null);
+               }
+               
+               /// <summary> Constructs an IndexWriter for the index in
+               /// <code>d</code>, first creating it if it does not
+               /// already exist.  Text will be analyzed with
+               /// <code>a</code>.
+               /// 
+               /// <p/><b>NOTE</b>: autoCommit (see <a
+               /// href="#autoCommit">above</a>) is set to false with this
+               /// constructor.
+               /// 
+               /// </summary>
+               /// <param name="d">the index directory
+               /// </param>
+               /// <param name="a">the analyzer to use
+               /// </param>
+               /// <param name="mfl">Maximum field length in number of terms/tokens: LIMITED, UNLIMITED, or user-specified
+               /// via the MaxFieldLength constructor.
+               /// </param>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  LockObtainFailedException if another writer </throws>
+               /// <summary>  has this index open (<code>write.lock</code> could not
+               /// be obtained)
+               /// </summary>
+               /// <throws>  IOException if the directory cannot be </throws>
+               /// <summary>  read/written to or if there is any other low-level
+               /// IO error
+               /// </summary>
+               public IndexWriter(Directory d, Analyzer a, MaxFieldLength mfl)
+               {
+                       InitBlock();
+                       Init(d, a, false, null, false, mfl.GetLimit(), null, null);
+               }
+               
+               /// <summary> Constructs an IndexWriter for the index in
+               /// <code>d</code>, first creating it if it does not
+               /// already exist.  Text will be analyzed with
+               /// <code>a</code>.
+               /// 
+               /// </summary>
+               /// <param name="d">the index directory
+               /// </param>
+               /// <param name="a">the analyzer to use
+               /// </param>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  LockObtainFailedException if another writer </throws>
+               /// <summary>  has this index open (<code>write.lock</code> could not
+               /// be obtained)
+               /// </summary>
+               /// <throws>  IOException if the directory cannot be </throws>
+               /// <summary>  read/written to or if there is any other low-level
+               /// IO error
+               /// </summary>
+               /// <deprecated> This constructor will be removed in the 3.0 release.
+               /// Use {@link
+               /// #IndexWriter(Directory,Analyzer,MaxFieldLength)}
+               /// instead, and call {@link #Commit()} when needed.
+               /// </deprecated>
+        [Obsolete("This constructor will be removed in the 3.0 release. Use IndexWriter(Directory,Analyzer,MaxFieldLength) instead, and call Commit() when needed.")]
+               public IndexWriter(Directory d, Analyzer a)
+               {
+                       InitBlock();
+                       Init(d, a, false, null, true, DEFAULT_MAX_FIELD_LENGTH, null, null);
+               }
+               
+               /// <summary> Constructs an IndexWriter for the index in
+               /// <code>d</code>, first creating it if it does not
+               /// already exist.  Text will be analyzed with
+               /// <code>a</code>.
+               /// 
+               /// </summary>
+               /// <param name="d">the index directory
+               /// </param>
+               /// <param name="autoCommit">see <a href="#autoCommit">above</a>
+               /// </param>
+               /// <param name="a">the analyzer to use
+               /// </param>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  LockObtainFailedException if another writer </throws>
+               /// <summary>  has this index open (<code>write.lock</code> could not
+               /// be obtained)
+               /// </summary>
+               /// <throws>  IOException if the directory cannot be </throws>
+               /// <summary>  read/written to or if there is any other low-level
+               /// IO error
+               /// </summary>
+               /// <deprecated> This constructor will be removed in the 3.0 release.
+               /// Use {@link
+               /// #IndexWriter(Directory,Analyzer,MaxFieldLength)}
+               /// instead, and call {@link #Commit()} when needed.
+               /// </deprecated>
+        [Obsolete("This constructor will be removed in the 3.0 release. Use IndexWriter(Directory,Analyzer,MaxFieldLength) instead, and call Commit() when needed.")]
+               public IndexWriter(Directory d, bool autoCommit, Analyzer a)
+               {
+                       InitBlock();
+                       Init(d, a, false, null, autoCommit, DEFAULT_MAX_FIELD_LENGTH, null, null);
+               }
+               
+               /// <summary> Constructs an IndexWriter for the index in <code>d</code>.
+               /// Text will be analyzed with <code>a</code>.  If <code>create</code>
+               /// is true, then a new, empty index will be created in
+               /// <code>d</code>, replacing the index already there, if any.
+               /// 
+               /// </summary>
+               /// <param name="d">the index directory
+               /// </param>
+               /// <param name="autoCommit">see <a href="#autoCommit">above</a>
+               /// </param>
+               /// <param name="a">the analyzer to use
+               /// </param>
+               /// <param name="create"><code>true</code> to create the index or overwrite
+               /// the existing one; <code>false</code> to append to the existing
+               /// index
+               /// </param>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  LockObtainFailedException if another writer </throws>
+               /// <summary>  has this index open (<code>write.lock</code> could not
+               /// be obtained)
+               /// </summary>
+               /// <throws>  IOException if the directory cannot be read/written to, or </throws>
+               /// <summary>  if it does not exist and <code>create</code> is
+               /// <code>false</code> or if there is any other low-level
+               /// IO error
+               /// </summary>
+               /// <deprecated> This constructor will be removed in the 3.0 release.
+               /// Use {@link
+               /// #IndexWriter(Directory,Analyzer,boolean,MaxFieldLength)}
+               /// instead, and call {@link #Commit()} when needed.
+               /// </deprecated>
+        [Obsolete("This constructor will be removed in the 3.0 release. Use IndexWriter(Directory,Analyzer,boolean,MaxFieldLength) instead, and call Commit() when needed.")]
+               public IndexWriter(Directory d, bool autoCommit, Analyzer a, bool create)
+               {
+                       InitBlock();
+                       Init(d, a, create, false, null, autoCommit, DEFAULT_MAX_FIELD_LENGTH, null, null);
+               }
+               
+               /// <summary> Expert: constructs an IndexWriter with a custom {@link
+               /// IndexDeletionPolicy}, for the index in <code>d</code>,
+               /// first creating it if it does not already exist.  Text
+               /// will be analyzed with <code>a</code>.
+               /// 
+               /// <p/><b>NOTE</b>: autoCommit (see <a
+               /// href="#autoCommit">above</a>) is set to false with this
+               /// constructor.
+               /// 
+               /// </summary>
+               /// <param name="d">the index directory
+               /// </param>
+               /// <param name="a">the analyzer to use
+               /// </param>
+               /// <param name="deletionPolicy">see <a href="#deletionPolicy">above</a>
+               /// </param>
+               /// <param name="mfl">whether or not to limit field lengths
+               /// </param>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  LockObtainFailedException if another writer </throws>
+               /// <summary>  has this index open (<code>write.lock</code> could not
+               /// be obtained)
+               /// </summary>
+               /// <throws>  IOException if the directory cannot be </throws>
+               /// <summary>  read/written to or if there is any other low-level
+               /// IO error
+               /// </summary>
+               public IndexWriter(Directory d, Analyzer a, IndexDeletionPolicy deletionPolicy, MaxFieldLength mfl)
+               {
+                       InitBlock();
+                       Init(d, a, false, deletionPolicy, false, mfl.GetLimit(), null, null);
+               }
+               
+               /// <summary> Expert: constructs an IndexWriter with a custom {@link
+               /// IndexDeletionPolicy}, for the index in <code>d</code>,
+               /// first creating it if it does not already exist.  Text
+               /// will be analyzed with <code>a</code>.
+               /// 
+               /// </summary>
+               /// <param name="d">the index directory
+               /// </param>
+               /// <param name="autoCommit">see <a href="#autoCommit">above</a>
+               /// </param>
+               /// <param name="a">the analyzer to use
+               /// </param>
+               /// <param name="deletionPolicy">see <a href="#deletionPolicy">above</a>
+               /// </param>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  LockObtainFailedException if another writer </throws>
+               /// <summary>  has this index open (<code>write.lock</code> could not
+               /// be obtained)
+               /// </summary>
+               /// <throws>  IOException if the directory cannot be </throws>
+               /// <summary>  read/written to or if there is any other low-level
+               /// IO error
+               /// </summary>
+               /// <deprecated> This constructor will be removed in the 3.0 release.
+               /// Use {@link
+               /// #IndexWriter(Directory,Analyzer,IndexDeletionPolicy,MaxFieldLength)}
+               /// instead, and call {@link #Commit()} when needed.
+               /// </deprecated>
+        [Obsolete("This constructor will be removed in the 3.0 release. Use IndexWriter(Directory,Analyzer,IndexDeletionPolicy,MaxFieldLength) instead, and call Commit() when needed.")]
+               public IndexWriter(Directory d, bool autoCommit, Analyzer a, IndexDeletionPolicy deletionPolicy)
+               {
+                       InitBlock();
+                       Init(d, a, false, deletionPolicy, autoCommit, DEFAULT_MAX_FIELD_LENGTH, null, null);
+               }
+               
+               /// <summary> Expert: constructs an IndexWriter with a custom {@link
+               /// IndexDeletionPolicy}, for the index in <code>d</code>.
+               /// Text will be analyzed with <code>a</code>.  If
+               /// <code>create</code> is true, then a new, empty index
+               /// will be created in <code>d</code>, replacing the index
+               /// already there, if any.
+               /// 
+               /// <p/><b>NOTE</b>: autoCommit (see <a
+               /// href="#autoCommit">above</a>) is set to false with this
+               /// constructor.
+               /// 
+               /// </summary>
+               /// <param name="d">the index directory
+               /// </param>
+               /// <param name="a">the analyzer to use
+               /// </param>
+               /// <param name="create"><code>true</code> to create the index or overwrite
+               /// the existing one; <code>false</code> to append to the existing
+               /// index
+               /// </param>
+               /// <param name="deletionPolicy">see <a href="#deletionPolicy">above</a>
+               /// </param>
+               /// <param name="mfl">{@link Mono.Lucene.Net.Index.IndexWriter.MaxFieldLength}, whether or not to limit field lengths.  Value is in number of terms/tokens
+               /// </param>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  LockObtainFailedException if another writer </throws>
+               /// <summary>  has this index open (<code>write.lock</code> could not
+               /// be obtained)
+               /// </summary>
+               /// <throws>  IOException if the directory cannot be read/written to, or </throws>
+               /// <summary>  if it does not exist and <code>create</code> is
+               /// <code>false</code> or if there is any other low-level
+               /// IO error
+               /// </summary>
+               public IndexWriter(Directory d, Analyzer a, bool create, IndexDeletionPolicy deletionPolicy, MaxFieldLength mfl)
+               {
+                       InitBlock();
+                       Init(d, a, create, false, deletionPolicy, false, mfl.GetLimit(), null, null);
+               }
+               
+               /// <summary> Expert: constructs an IndexWriter with a custom {@link
+               /// IndexDeletionPolicy} and {@link IndexingChain}, 
+               /// for the index in <code>d</code>.
+               /// Text will be analyzed with <code>a</code>.  If
+               /// <code>create</code> is true, then a new, empty index
+               /// will be created in <code>d</code>, replacing the index
+               /// already there, if any.
+               /// 
+               /// <p/><b>NOTE</b>: autoCommit (see <a
+               /// href="#autoCommit">above</a>) is set to false with this
+               /// constructor.
+               /// 
+               /// </summary>
+               /// <param name="d">the index directory
+               /// </param>
+               /// <param name="a">the analyzer to use
+               /// </param>
+               /// <param name="create"><code>true</code> to create the index or overwrite
+               /// the existing one; <code>false</code> to append to the existing
+               /// index
+               /// </param>
+               /// <param name="deletionPolicy">see <a href="#deletionPolicy">above</a>
+               /// </param>
+               /// <param name="mfl">whether or not to limit field lengths, value is in number of terms/tokens.  See {@link Mono.Lucene.Net.Index.IndexWriter.MaxFieldLength}.
+               /// </param>
+               /// <param name="indexingChain">the {@link DocConsumer} chain to be used to 
+               /// process documents
+               /// </param>
+               /// <param name="commit">which commit to open
+               /// </param>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  LockObtainFailedException if another writer </throws>
+               /// <summary>  has this index open (<code>write.lock</code> could not
+               /// be obtained)
+               /// </summary>
+               /// <throws>  IOException if the directory cannot be read/written to, or </throws>
+               /// <summary>  if it does not exist and <code>create</code> is
+               /// <code>false</code> or if there is any other low-level
+               /// IO error
+               /// </summary>
+               internal IndexWriter(Directory d, Analyzer a, bool create, IndexDeletionPolicy deletionPolicy, MaxFieldLength mfl, IndexingChain indexingChain, IndexCommit commit)
+               {
+                       InitBlock();
+                       Init(d, a, create, false, deletionPolicy, false, mfl.GetLimit(), indexingChain, commit);
+               }
+               
+               /// <summary> Expert: constructs an IndexWriter with a custom {@link
+               /// IndexDeletionPolicy}, for the index in <code>d</code>.
+               /// Text will be analyzed with <code>a</code>.  If
+               /// <code>create</code> is true, then a new, empty index
+               /// will be created in <code>d</code>, replacing the index
+               /// already there, if any.
+               /// 
+               /// </summary>
+               /// <param name="d">the index directory
+               /// </param>
+               /// <param name="autoCommit">see <a href="#autoCommit">above</a>
+               /// </param>
+               /// <param name="a">the analyzer to use
+               /// </param>
+               /// <param name="create"><code>true</code> to create the index or overwrite
+               /// the existing one; <code>false</code> to append to the existing
+               /// index
+               /// </param>
+               /// <param name="deletionPolicy">see <a href="#deletionPolicy">above</a>
+               /// </param>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  LockObtainFailedException if another writer </throws>
+               /// <summary>  has this index open (<code>write.lock</code> could not
+               /// be obtained)
+               /// </summary>
+               /// <throws>  IOException if the directory cannot be read/written to, or </throws>
+               /// <summary>  if it does not exist and <code>create</code> is
+               /// <code>false</code> or if there is any other low-level
+               /// IO error
+               /// </summary>
+               /// <deprecated> This constructor will be removed in the 3.0 release.
+               /// Use {@link
+               /// #IndexWriter(Directory,Analyzer,boolean,IndexDeletionPolicy,MaxFieldLength)}
+               /// instead, and call {@link #Commit()} when needed.
+               /// </deprecated>
+        [Obsolete("This constructor will be removed in the 3.0 release. Use IndexWriter(Directory,Analyzer,boolean,IndexDeletionPolicy,MaxFieldLength) instead, and call Commit() when needed.")]
+               public IndexWriter(Directory d, bool autoCommit, Analyzer a, bool create, IndexDeletionPolicy deletionPolicy)
+               {
+                       InitBlock();
+                       Init(d, a, create, false, deletionPolicy, autoCommit, DEFAULT_MAX_FIELD_LENGTH, null, null);
+               }
+               
+               /// <summary> Expert: constructs an IndexWriter on specific commit
+               /// point, with a custom {@link IndexDeletionPolicy}, for
+               /// the index in <code>d</code>.  Text will be analyzed
+               /// with <code>a</code>.
+               /// 
+               /// <p/> This is only meaningful if you've used a {@link
+               /// IndexDeletionPolicy} in that past that keeps more than
+               /// just the last commit.
+               /// 
+               /// <p/>This operation is similar to {@link #Rollback()},
+               /// except that method can only rollback what's been done
+               /// with the current instance of IndexWriter since its last
+               /// commit, whereas this method can rollback to an
+               /// arbitrary commit point from the past, assuming the
+               /// {@link IndexDeletionPolicy} has preserved past
+               /// commits.
+               /// 
+               /// <p/><b>NOTE</b>: autoCommit (see <a
+               /// href="#autoCommit">above</a>) is set to false with this
+               /// constructor.
+               /// 
+               /// </summary>
+               /// <param name="d">the index directory
+               /// </param>
+               /// <param name="a">the analyzer to use
+               /// </param>
+               /// <param name="deletionPolicy">see <a href="#deletionPolicy">above</a>
+               /// </param>
+               /// <param name="mfl">whether or not to limit field lengths, value is in number of terms/tokens.  See {@link Mono.Lucene.Net.Index.IndexWriter.MaxFieldLength}.
+               /// </param>
+               /// <param name="commit">which commit to open
+               /// </param>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  LockObtainFailedException if another writer </throws>
+               /// <summary>  has this index open (<code>write.lock</code> could not
+               /// be obtained)
+               /// </summary>
+               /// <throws>  IOException if the directory cannot be read/written to, or </throws>
+               /// <summary>  if it does not exist and <code>create</code> is
+               /// <code>false</code> or if there is any other low-level
+               /// IO error
+               /// </summary>
+               public IndexWriter(Directory d, Analyzer a, IndexDeletionPolicy deletionPolicy, MaxFieldLength mfl, IndexCommit commit)
+               {
+                       InitBlock();
+                       Init(d, a, false, false, deletionPolicy, false, mfl.GetLimit(), null, commit);
+               }
+               
+               private void  Init(Directory d, Analyzer a, bool closeDir, IndexDeletionPolicy deletionPolicy, bool autoCommit, int maxFieldLength, IndexingChain indexingChain, IndexCommit commit)
+               {
+                       if (IndexReader.IndexExists(d))
+                       {
+                               Init(d, a, false, closeDir, deletionPolicy, autoCommit, maxFieldLength, indexingChain, commit);
+                       }
+                       else
+                       {
+                               Init(d, a, true, closeDir, deletionPolicy, autoCommit, maxFieldLength, indexingChain, commit);
+                       }
+               }
+               
+               private void  Init(Directory d, Analyzer a, bool create, bool closeDir, IndexDeletionPolicy deletionPolicy, bool autoCommit, int maxFieldLength, IndexingChain indexingChain, IndexCommit commit)
+               {
+                       this.closeDir = closeDir;
+                       directory = d;
+                       analyzer = a;
+                       SetMessageID(defaultInfoStream);
+                       this.maxFieldLength = maxFieldLength;
+                       
+                       if (indexingChain == null)
+                               indexingChain = DocumentsWriter.DefaultIndexingChain;
+                       
+                       if (create)
+                       {
+                               // Clear the write lock in case it's leftover:
+                               directory.ClearLock(WRITE_LOCK_NAME);
+                       }
+                       
+                       Lock writeLock = directory.MakeLock(WRITE_LOCK_NAME);
+                       if (!writeLock.Obtain(writeLockTimeout))
+                       // obtain write lock
+                       {
+                               throw new LockObtainFailedException("Index locked for write: " + writeLock);
+                       }
+                       this.writeLock = writeLock; // save it
+
+            bool success = false;
+                       try
+                       {
+                               if (create)
+                               {
+                                       // Try to read first.  This is to allow create
+                                       // against an index that's currently open for
+                                       // searching.  In this case we write the next
+                                       // segments_N file with no segments:
+                                       bool doCommit;
+                                       try
+                                       {
+                                               segmentInfos.Read(directory);
+                                               segmentInfos.Clear();
+                                               doCommit = false;
+                                       }
+                                       catch (System.IO.IOException e)
+                                       {
+                                               // Likely this means it's a fresh directory
+                                               doCommit = true;
+                                       }
+                                       
+                                       if (autoCommit || doCommit)
+                                       {
+                                               // Always commit if autoCommit=true, else only
+                                               // commit if there is no segments file in this dir
+                                               // already.
+                                               segmentInfos.Commit(directory);
+                                               SupportClass.CollectionsHelper.AddAllIfNotContains(synced, segmentInfos.Files(directory, true));
+                                       }
+                                       else
+                                       {
+                                               // Record that we have a change (zero out all
+                                               // segments) pending:
+                                               changeCount++;
+                                       }
+                               }
+                               else
+                               {
+                                       segmentInfos.Read(directory);
+                                       
+                                       if (commit != null)
+                                       {
+                                               // Swap out all segments, but, keep metadata in
+                                               // SegmentInfos, like version & generation, to
+                                               // preserve write-once.  This is important if
+                                               // readers are open against the future commit
+                                               // points.
+                                               if (commit.GetDirectory() != directory)
+                                                       throw new System.ArgumentException("IndexCommit's directory doesn't match my directory");
+                                               SegmentInfos oldInfos = new SegmentInfos();
+                                               oldInfos.Read(directory, commit.GetSegmentsFileName());
+                                               segmentInfos.Replace(oldInfos);
+                                               changeCount++;
+                                               if (infoStream != null)
+                                                       Message("init: loaded commit \"" + commit.GetSegmentsFileName() + "\"");
+                                       }
+                                       
+                                       // We assume that this segments_N was previously
+                                       // properly sync'd:
+                                       SupportClass.CollectionsHelper.AddAllIfNotContains(synced, segmentInfos.Files(directory, true));
+                               }
+                               
+                               this.autoCommit = autoCommit;
+                               SetRollbackSegmentInfos(segmentInfos);
+                               
+                               docWriter = new DocumentsWriter(directory, this, indexingChain);
+                               docWriter.SetInfoStream(infoStream);
+                               docWriter.SetMaxFieldLength(maxFieldLength);
+                               
+                               // Default deleter (for backwards compatibility) is
+                               // KeepOnlyLastCommitDeleter:
+                               deleter = new IndexFileDeleter(directory, deletionPolicy == null?new KeepOnlyLastCommitDeletionPolicy():deletionPolicy, segmentInfos, infoStream, docWriter,synced);
+                               
+                               if (deleter.startingCommitDeleted)
+                               // Deletion policy deleted the "head" commit point.
+                               // We have to mark ourself as changed so that if we
+                               // are closed w/o any further changes we write a new
+                               // segments_N file.
+                                       changeCount++;
+                               
+                               PushMaxBufferedDocs();
+                               
+                               if (infoStream != null)
+                               {
+                                       Message("init: create=" + create);
+                                       MessageState();
+                               }
+
+                success = true;
+                       }
+                       finally
+                       {
+                if (!success)
+                {
+                    if (infoStream != null)
+                    {
+                        Message("init: hit exception on init; releasing write lock");
+                    }
+                    try
+                    {
+                        writeLock.Release();
+                    }
+                    catch (Exception t)
+                    {
+                        // don't mask the original exception
+                    }
+                    writeLock = null;
+                }
+                       }
+               }
+               
+               private void  SetRollbackSegmentInfos(SegmentInfos infos)
+               {
+                       lock (this)
+                       {
+                               rollbackSegmentInfos = (SegmentInfos) infos.Clone();
+                               System.Diagnostics.Debug.Assert(!rollbackSegmentInfos.HasExternalSegments(directory));
+                               rollbackSegments = new System.Collections.Hashtable();
+                               int size = rollbackSegmentInfos.Count;
+                               for (int i = 0; i < size; i++)
+                                       rollbackSegments[rollbackSegmentInfos.Info(i)] = (System.Int32) i;
+                       }
+               }
+               
+               /// <summary> Expert: set the merge policy used by this writer.</summary>
+               public virtual void  SetMergePolicy(MergePolicy mp)
+               {
+                       EnsureOpen();
+                       if (mp == null)
+                               throw new System.NullReferenceException("MergePolicy must be non-null");
+                       
+                       if (mergePolicy != mp)
+                               mergePolicy.Close();
+                       mergePolicy = mp;
+                       PushMaxBufferedDocs();
+                       if (infoStream != null)
+                       {
+                               Message("setMergePolicy " + mp);
+                       }
+               }
+               
+               /// <summary> Expert: returns the current MergePolicy in use by this writer.</summary>
+               /// <seealso cref="setMergePolicy">
+               /// </seealso>
+               public virtual MergePolicy GetMergePolicy()
+               {
+                       EnsureOpen();
+                       return mergePolicy;
+               }
+               
+               /// <summary> Expert: set the merge scheduler used by this writer.</summary>
+               public virtual void  SetMergeScheduler(MergeScheduler mergeScheduler)
+               {
+                       lock (this)
+                       {
+                               EnsureOpen();
+                               if (mergeScheduler == null)
+                                       throw new System.NullReferenceException("MergeScheduler must be non-null");
+                               
+                               if (this.mergeScheduler != mergeScheduler)
+                               {
+                                       FinishMerges(true);
+                                       this.mergeScheduler.Close();
+                               }
+                               this.mergeScheduler = mergeScheduler;
+                               if (infoStream != null)
+                               {
+                                       Message("setMergeScheduler " + mergeScheduler);
+                               }
+                       }
+               }
+               
+               /// <summary> Expert: returns the current MergePolicy in use by this
+               /// writer.
+               /// </summary>
+               /// <seealso cref="setMergePolicy">
+               /// </seealso>
+               public virtual MergeScheduler GetMergeScheduler()
+               {
+                       EnsureOpen();
+                       return mergeScheduler;
+               }
+               
+               /// <summary><p/>Determines the largest segment (measured by
+               /// document count) that may be merged with other segments.
+               /// Small values (e.g., less than 10,000) are best for
+               /// interactive indexing, as this limits the length of
+               /// pauses while indexing to a few seconds.  Larger values
+               /// are best for batched indexing and speedier
+               /// searches.<p/>
+               /// 
+               /// <p/>The default value is {@link Integer#MAX_VALUE}.<p/>
+               /// 
+               /// <p/>Note that this method is a convenience method: it
+               /// just calls mergePolicy.setMaxMergeDocs as long as
+               /// mergePolicy is an instance of {@link LogMergePolicy}.
+               /// Otherwise an IllegalArgumentException is thrown.<p/>
+               /// 
+               /// <p/>The default merge policy ({@link
+               /// LogByteSizeMergePolicy}) also allows you to set this
+               /// limit by net size (in MB) of the segment, using {@link
+               /// LogByteSizeMergePolicy#setMaxMergeMB}.<p/>
+               /// </summary>
+               public virtual void  SetMaxMergeDocs(int maxMergeDocs)
+               {
+                       GetLogMergePolicy().SetMaxMergeDocs(maxMergeDocs);
+               }
+               
+               /// <summary> <p/>Returns the largest segment (measured by document
+               /// count) that may be merged with other segments.<p/>
+               /// 
+               /// <p/>Note that this method is a convenience method: it
+               /// just calls mergePolicy.getMaxMergeDocs as long as
+               /// mergePolicy is an instance of {@link LogMergePolicy}.
+               /// Otherwise an IllegalArgumentException is thrown.<p/>
+               /// 
+               /// </summary>
+               /// <seealso cref="setMaxMergeDocs">
+               /// </seealso>
+               public virtual int GetMaxMergeDocs()
+               {
+                       return GetLogMergePolicy().GetMaxMergeDocs();
+               }
+               
+               /// <summary> The maximum number of terms that will be indexed for a single field in a
+               /// document.  This limits the amount of memory required for indexing, so that
+               /// collections with very large files will not crash the indexing process by
+               /// running out of memory.  This setting refers to the number of running terms,
+               /// not to the number of different terms.<p/>
+               /// <strong>Note:</strong> this silently truncates large documents, excluding from the
+               /// index all terms that occur further in the document.  If you know your source
+               /// documents are large, be sure to set this value high enough to accomodate
+               /// the expected size.  If you set it to Integer.MAX_VALUE, then the only limit
+               /// is your memory, but you should anticipate an OutOfMemoryError.<p/>
+               /// By default, no more than {@link #DEFAULT_MAX_FIELD_LENGTH} terms
+               /// will be indexed for a field.
+               /// </summary>
+               public virtual void  SetMaxFieldLength(int maxFieldLength)
+               {
+                       EnsureOpen();
+                       this.maxFieldLength = maxFieldLength;
+                       docWriter.SetMaxFieldLength(maxFieldLength);
+                       if (infoStream != null)
+                               Message("setMaxFieldLength " + maxFieldLength);
+               }
+               
+               /// <summary> Returns the maximum number of terms that will be
+               /// indexed for a single field in a document.
+               /// </summary>
+               /// <seealso cref="setMaxFieldLength">
+               /// </seealso>
+               public virtual int GetMaxFieldLength()
+               {
+                       EnsureOpen();
+                       return maxFieldLength;
+               }
+
+        /** Sets the termsIndexDivisor passed to any readers that
+        *  IndexWriter opens, for example when applying deletes
+        *  or creating a near-real-time reader in {@link
+        *  IndexWriter#getReader}.  Default value is {@link
+        *  IndexReader#DEFAULT_TERMS_INDEX_DIVISOR}. */
+        public void SetReaderTermsIndexDivisor(int divisor)
+        {
+            EnsureOpen();
+            if (divisor <= 0)
+            {
+                throw new System.ArgumentException("divisor must be >= 1 (got " + divisor + ")");
+            }
+            readerTermsIndexDivisor = divisor;
+            if (infoStream != null)
+            {
+                Message("setReaderTermsIndexDivisor " + readerTermsIndexDivisor);
+            }
+        }
+
+        /** @see #setReaderTermsIndexDivisor */
+        public int GetReaderTermsIndexDivisor()
+        {
+            EnsureOpen();
+            return readerTermsIndexDivisor;
+        }
+               
+               /// <summary>Determines the minimal number of documents required
+               /// before the buffered in-memory documents are flushed as
+               /// a new Segment.  Large values generally gives faster
+               /// indexing.
+               /// 
+               /// <p/>When this is set, the writer will flush every
+               /// maxBufferedDocs added documents.  Pass in {@link
+               /// #DISABLE_AUTO_FLUSH} to prevent triggering a flush due
+               /// to number of buffered documents.  Note that if flushing
+               /// by RAM usage is also enabled, then the flush will be
+               /// triggered by whichever comes first.<p/>
+               /// 
+               /// <p/>Disabled by default (writer flushes by RAM usage).<p/>
+               /// 
+               /// </summary>
+               /// <throws>  IllegalArgumentException if maxBufferedDocs is </throws>
+               /// <summary> enabled but smaller than 2, or it disables maxBufferedDocs
+               /// when ramBufferSize is already disabled
+               /// </summary>
+               /// <seealso cref="setRAMBufferSizeMB">
+               /// </seealso>
+               public virtual void  SetMaxBufferedDocs(int maxBufferedDocs)
+               {
+                       EnsureOpen();
+                       if (maxBufferedDocs != DISABLE_AUTO_FLUSH && maxBufferedDocs < 2)
+                               throw new System.ArgumentException("maxBufferedDocs must at least be 2 when enabled");
+                       if (maxBufferedDocs == DISABLE_AUTO_FLUSH && GetRAMBufferSizeMB() == DISABLE_AUTO_FLUSH)
+                               throw new System.ArgumentException("at least one of ramBufferSize and maxBufferedDocs must be enabled");
+                       docWriter.SetMaxBufferedDocs(maxBufferedDocs);
+                       PushMaxBufferedDocs();
+                       if (infoStream != null)
+                               Message("setMaxBufferedDocs " + maxBufferedDocs);
+               }
+               
+               /// <summary> If we are flushing by doc count (not by RAM usage), and
+               /// using LogDocMergePolicy then push maxBufferedDocs down
+               /// as its minMergeDocs, to keep backwards compatibility.
+               /// </summary>
+               private void  PushMaxBufferedDocs()
+               {
+                       if (docWriter.GetMaxBufferedDocs() != DISABLE_AUTO_FLUSH)
+                       {
+                               MergePolicy mp = mergePolicy;
+                               if (mp is LogDocMergePolicy)
+                               {
+                                       LogDocMergePolicy lmp = (LogDocMergePolicy) mp;
+                                       int maxBufferedDocs = docWriter.GetMaxBufferedDocs();
+                                       if (lmp.GetMinMergeDocs() != maxBufferedDocs)
+                                       {
+                                               if (infoStream != null)
+                                                       Message("now push maxBufferedDocs " + maxBufferedDocs + " to LogDocMergePolicy");
+                                               lmp.SetMinMergeDocs(maxBufferedDocs);
+                                       }
+                               }
+                       }
+               }
+               
+               /// <summary> Returns the number of buffered added documents that will
+               /// trigger a flush if enabled.
+               /// </summary>
+               /// <seealso cref="setMaxBufferedDocs">
+               /// </seealso>
+               public virtual int GetMaxBufferedDocs()
+               {
+                       EnsureOpen();
+                       return docWriter.GetMaxBufferedDocs();
+               }
+               
+               /// <summary>Determines the amount of RAM that may be used for
+               /// buffering added documents and deletions before they are
+               /// flushed to the Directory.  Generally for faster
+               /// indexing performance it's best to flush by RAM usage
+               /// instead of document count and use as large a RAM buffer
+               /// as you can.
+               /// 
+               /// <p/>When this is set, the writer will flush whenever
+               /// buffered documents and deletions use this much RAM.
+               /// Pass in {@link #DISABLE_AUTO_FLUSH} to prevent
+               /// triggering a flush due to RAM usage.  Note that if
+               /// flushing by document count is also enabled, then the
+               /// flush will be triggered by whichever comes first.<p/>
+               /// 
+               /// <p/> <b>NOTE</b>: the account of RAM usage for pending
+               /// deletions is only approximate.  Specifically, if you
+               /// delete by Query, Lucene currently has no way to measure
+               /// the RAM usage if individual Queries so the accounting
+               /// will under-estimate and you should compensate by either
+               /// calling commit() periodically yourself, or by using
+               /// {@link #setMaxBufferedDeleteTerms} to flush by count
+               /// instead of RAM usage (each buffered delete Query counts
+               /// as one).
+               /// 
+               /// <p/>
+               /// <b>NOTE</b>: because IndexWriter uses <code>int</code>s when managing its
+               /// internal storage, the absolute maximum value for this setting is somewhat
+               /// less than 2048 MB. The precise limit depends on various factors, such as
+               /// how large your documents are, how many fields have norms, etc., so it's
+               /// best to set this value comfortably under 2048.
+               /// <p/>
+               /// 
+               /// <p/> The default value is {@link #DEFAULT_RAM_BUFFER_SIZE_MB}.<p/>
+               /// 
+               /// </summary>
+               /// <throws>  IllegalArgumentException if ramBufferSize is </throws>
+               /// <summary> enabled but non-positive, or it disables ramBufferSize
+               /// when maxBufferedDocs is already disabled
+               /// </summary>
+               public virtual void  SetRAMBufferSizeMB(double mb)
+               {
+                       if (mb > 2048.0)
+                       {
+                               throw new System.ArgumentException("ramBufferSize " + mb + " is too large; should be comfortably less than 2048");
+                       }
+                       if (mb != DISABLE_AUTO_FLUSH && mb <= 0.0)
+                               throw new System.ArgumentException("ramBufferSize should be > 0.0 MB when enabled");
+                       if (mb == DISABLE_AUTO_FLUSH && GetMaxBufferedDocs() == DISABLE_AUTO_FLUSH)
+                               throw new System.ArgumentException("at least one of ramBufferSize and maxBufferedDocs must be enabled");
+                       docWriter.SetRAMBufferSizeMB(mb);
+                       if (infoStream != null)
+                               Message("setRAMBufferSizeMB " + mb);
+               }
+               
+               /// <summary> Returns the value set by {@link #setRAMBufferSizeMB} if enabled.</summary>
+               public virtual double GetRAMBufferSizeMB()
+               {
+                       return docWriter.GetRAMBufferSizeMB();
+               }
+               
+               /// <summary> <p/>Determines the minimal number of delete terms required before the buffered
+               /// in-memory delete terms are applied and flushed. If there are documents
+               /// buffered in memory at the time, they are merged and a new segment is
+               /// created.<p/>
+               /// <p/>Disabled by default (writer flushes by RAM usage).<p/>
+               /// 
+               /// </summary>
+               /// <throws>  IllegalArgumentException if maxBufferedDeleteTerms </throws>
+               /// <summary> is enabled but smaller than 1
+               /// </summary>
+               /// <seealso cref="setRAMBufferSizeMB">
+               /// </seealso>
+               public virtual void  SetMaxBufferedDeleteTerms(int maxBufferedDeleteTerms)
+               {
+                       EnsureOpen();
+                       if (maxBufferedDeleteTerms != DISABLE_AUTO_FLUSH && maxBufferedDeleteTerms < 1)
+                               throw new System.ArgumentException("maxBufferedDeleteTerms must at least be 1 when enabled");
+                       docWriter.SetMaxBufferedDeleteTerms(maxBufferedDeleteTerms);
+                       if (infoStream != null)
+                               Message("setMaxBufferedDeleteTerms " + maxBufferedDeleteTerms);
+               }
+               
+               /// <summary> Returns the number of buffered deleted terms that will
+               /// trigger a flush if enabled.
+               /// </summary>
+               /// <seealso cref="setMaxBufferedDeleteTerms">
+               /// </seealso>
+               public virtual int GetMaxBufferedDeleteTerms()
+               {
+                       EnsureOpen();
+                       return docWriter.GetMaxBufferedDeleteTerms();
+               }
+               
+               /// <summary>Determines how often segment indices are merged by addDocument().  With
+               /// smaller values, less RAM is used while indexing, and searches on
+               /// unoptimized indices are faster, but indexing speed is slower.  With larger
+               /// values, more RAM is used during indexing, and while searches on unoptimized
+               /// indices are slower, indexing is faster.  Thus larger values (> 10) are best
+        /// for batch index creation, and smaller values (&lt; 10) for indices that are
+               /// interactively maintained.
+               /// 
+               /// <p/>Note that this method is a convenience method: it
+               /// just calls mergePolicy.setMergeFactor as long as
+               /// mergePolicy is an instance of {@link LogMergePolicy}.
+               /// Otherwise an IllegalArgumentException is thrown.<p/>
+               /// 
+               /// <p/>This must never be less than 2.  The default value is 10.
+               /// </summary>
+               public virtual void  SetMergeFactor(int mergeFactor)
+               {
+                       GetLogMergePolicy().SetMergeFactor(mergeFactor);
+               }
+               
+               /// <summary> <p/>Returns the number of segments that are merged at
+               /// once and also controls the total number of segments
+               /// allowed to accumulate in the index.<p/>
+               /// 
+               /// <p/>Note that this method is a convenience method: it
+               /// just calls mergePolicy.getMergeFactor as long as
+               /// mergePolicy is an instance of {@link LogMergePolicy}.
+               /// Otherwise an IllegalArgumentException is thrown.<p/>
+               /// 
+               /// </summary>
+               /// <seealso cref="setMergeFactor">
+               /// </seealso>
+               public virtual int GetMergeFactor()
+               {
+                       return GetLogMergePolicy().GetMergeFactor();
+               }
+               
+               /// <summary> Expert: returns max delay inserted before syncing a
+               /// commit point.  On Windows, at least, pausing before
+               /// syncing can increase net indexing throughput.  The
+               /// delay is variable based on size of the segment's files,
+               /// and is only inserted when using
+               /// ConcurrentMergeScheduler for merges.
+               /// </summary>
+               /// <deprecated> This will be removed in 3.0, when
+               /// autoCommit=true is removed from IndexWriter.
+               /// </deprecated>
+        [Obsolete("This will be removed in 3.0, when autoCommit=true is removed from IndexWriter.")]
+               public virtual double GetMaxSyncPauseSeconds()
+               {
+                       return maxSyncPauseSeconds;
+               }
+               
+               /// <summary> Expert: sets the max delay before syncing a commit
+               /// point.
+               /// </summary>
+               /// <seealso cref="getMaxSyncPauseSeconds">
+               /// </seealso>
+               /// <deprecated> This will be removed in 3.0, when
+               /// autoCommit=true is removed from IndexWriter.
+               /// </deprecated>
+        [Obsolete("This will be removed in 3.0, when autoCommit=true is removed from IndexWriter.")]
+               public virtual void  SetMaxSyncPauseSeconds(double seconds)
+               {
+                       maxSyncPauseSeconds = seconds;
+               }
+               
+               /// <summary>If non-null, this will be the default infoStream used
+               /// by a newly instantiated IndexWriter.
+               /// </summary>
+               /// <seealso cref="setInfoStream">
+               /// </seealso>
+               public static void  SetDefaultInfoStream(System.IO.StreamWriter infoStream)
+               {
+                       IndexWriter.defaultInfoStream = infoStream;
+               }
+               
+               /// <summary> Returns the current default infoStream for newly
+               /// instantiated IndexWriters.
+               /// </summary>
+               /// <seealso cref="setDefaultInfoStream">
+               /// </seealso>
+               public static System.IO.StreamWriter GetDefaultInfoStream()
+               {
+                       return IndexWriter.defaultInfoStream;
+               }
+               
+               /// <summary>If non-null, information about merges, deletes and a
+               /// message when maxFieldLength is reached will be printed
+               /// to this.
+               /// </summary>
+               public virtual void  SetInfoStream(System.IO.StreamWriter infoStream)
+               {
+                       EnsureOpen();
+                       SetMessageID(infoStream);
+                       docWriter.SetInfoStream(infoStream);
+                       deleter.SetInfoStream(infoStream);
+                       if (infoStream != null)
+                               MessageState();
+               }
+               
+               private void  MessageState()
+               {
+                       Message("setInfoStream: dir=" + directory + " autoCommit=" + autoCommit + " mergePolicy=" + mergePolicy + " mergeScheduler=" + mergeScheduler + " ramBufferSizeMB=" + docWriter.GetRAMBufferSizeMB() + " maxBufferedDocs=" + docWriter.GetMaxBufferedDocs() + " maxBuffereDeleteTerms=" + docWriter.GetMaxBufferedDeleteTerms() + " maxFieldLength=" + maxFieldLength + " index=" + SegString());
+               }
+               
+               /// <summary> Returns the current infoStream in use by this writer.</summary>
+               /// <seealso cref="setInfoStream">
+               /// </seealso>
+               public virtual System.IO.StreamWriter GetInfoStream()
+               {
+                       EnsureOpen();
+                       return infoStream;
+               }
+               
+               /// <summary>Returns true if verbosing is enabled (i.e., infoStream != null). </summary>
+               public virtual bool Verbose()
+               {
+                       return infoStream != null;
+               }
+               
+               /// <seealso cref="setDefaultWriteLockTimeout"> to change the default value for all instances of IndexWriter.
+               /// </seealso>
+               public virtual void  SetWriteLockTimeout(long writeLockTimeout)
+               {
+                       EnsureOpen();
+                       this.writeLockTimeout = writeLockTimeout;
+               }
+               
+               /// <summary> Returns allowed timeout when acquiring the write lock.</summary>
+               /// <seealso cref="setWriteLockTimeout">
+               /// </seealso>
+               public virtual long GetWriteLockTimeout()
+               {
+                       EnsureOpen();
+                       return writeLockTimeout;
+               }
+               
+               /// <summary> Sets the default (for any instance of IndexWriter) maximum time to wait for a write lock (in
+               /// milliseconds).
+               /// </summary>
+               public static void  SetDefaultWriteLockTimeout(long writeLockTimeout)
+               {
+                       IndexWriter.WRITE_LOCK_TIMEOUT = writeLockTimeout;
+               }
+               
+               /// <summary> Returns default write lock timeout for newly
+               /// instantiated IndexWriters.
+               /// </summary>
+               /// <seealso cref="setDefaultWriteLockTimeout">
+               /// </seealso>
+               public static long GetDefaultWriteLockTimeout()
+               {
+                       return IndexWriter.WRITE_LOCK_TIMEOUT;
+               }
+               
+               /// <summary> Commits all changes to an index and closes all
+               /// associated files.  Note that this may be a costly
+               /// operation, so, try to re-use a single writer instead of
+               /// closing and opening a new one.  See {@link #Commit()} for
+               /// caveats about write caching done by some IO devices.
+               /// 
+               /// <p/> If an Exception is hit during close, eg due to disk
+               /// full or some other reason, then both the on-disk index
+               /// and the internal state of the IndexWriter instance will
+               /// be consistent.  However, the close will not be complete
+               /// even though part of it (flushing buffered documents)
+               /// may have succeeded, so the write lock will still be
+               /// held.<p/>
+               /// 
+               /// <p/> If you can correct the underlying cause (eg free up
+               /// some disk space) then you can call close() again.
+               /// Failing that, if you want to force the write lock to be
+               /// released (dangerous, because you may then lose buffered
+               /// docs in the IndexWriter instance) then you can do
+               /// something like this:<p/>
+               /// 
+               /// <pre>
+               /// try {
+               /// writer.close();
+               /// } finally {
+               /// if (IndexWriter.isLocked(directory)) {
+               /// IndexWriter.unlock(directory);
+               /// }
+               /// }
+               /// </pre>
+               /// 
+               /// after which, you must be certain not to use the writer
+               /// instance anymore.<p/>
+               /// 
+               /// <p/><b>NOTE</b>: if this method hits an OutOfMemoryError
+               /// you should immediately close the writer, again.  See <a
+               /// href="#OOME">above</a> for details.<p/>
+               /// 
+               /// </summary>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               public virtual void  Close()
+               {
+                       Close(true);
+               }
+
+        /// <summary>
+        /// .NET
+        /// </summary>
+        public virtual void Dispose()
+        {
+            Close();
+        }
+               
+               /// <summary> Closes the index with or without waiting for currently
+               /// running merges to finish.  This is only meaningful when
+               /// using a MergeScheduler that runs merges in background
+               /// threads.
+               /// 
+               /// <p/><b>NOTE</b>: if this method hits an OutOfMemoryError
+               /// you should immediately close the writer, again.  See <a
+               /// href="#OOME">above</a> for details.<p/>
+               /// 
+               /// <p/><b>NOTE</b>: it is dangerous to always call
+               /// close(false), especially when IndexWriter is not open
+               /// for very long, because this can result in "merge
+               /// starvation" whereby long merges will never have a
+               /// chance to finish.  This will cause too many segments in
+               /// your index over time.<p/>
+               /// 
+               /// </summary>
+               /// <param name="waitForMerges">if true, this call will block
+               /// until all merges complete; else, it will ask all
+               /// running merges to abort, wait until those merges have
+               /// finished (which should be at most a few seconds), and
+               /// then return.
+               /// </param>
+               public virtual void  Close(bool waitForMerges)
+               {
+                       
+                       // Ensure that only one thread actually gets to do the closing:
+                       if (ShouldClose())
+                       {
+                               // If any methods have hit OutOfMemoryError, then abort
+                               // on close, in case the internal state of IndexWriter
+                               // or DocumentsWriter is corrupt
+                               if (hitOOM)
+                                       RollbackInternal();
+                               else
+                                       CloseInternal(waitForMerges);
+                       }
+               }
+               
+               // Returns true if this thread should attempt to close, or
+               // false if IndexWriter is now closed; else, waits until
+               // another thread finishes closing
+               private bool ShouldClose()
+               {
+                       lock (this)
+                       {
+                               while (true)
+                               {
+                                       if (!closed)
+                                       {
+                                               if (!closing)
+                                               {
+                                                       closing = true;
+                                                       return true;
+                                               }
+                                               else
+                                               {
+                                                       // Another thread is presently trying to close;
+                                                       // wait until it finishes one way (closes
+                                                       // successfully) or another (fails to close)
+                                                       DoWait();
+                                               }
+                                       }
+                                       else
+                                               return false;
+                               }
+                       }
+               }
+               
+               private void  CloseInternal(bool waitForMerges)
+               {
+                       
+                       docWriter.PauseAllThreads();
+                       
+                       try
+                       {
+                               if (infoStream != null)
+                                       Message("now flush at close");
+                               
+                               docWriter.Close();
+                               
+                               // Only allow a new merge to be triggered if we are
+                               // going to wait for merges:
+                               if (!hitOOM)
+                               {
+                                       Flush(waitForMerges, true, true);
+                               }
+                               
+                               if (waitForMerges)
+                               // Give merge scheduler last chance to run, in case
+                               // any pending merges are waiting:
+                                       mergeScheduler.Merge(this);
+                               
+                               mergePolicy.Close();
+                               
+                               FinishMerges(waitForMerges);
+                               stopMerges = true;
+                               
+                               mergeScheduler.Close();
+                               
+                               if (infoStream != null)
+                                       Message("now call final commit()");
+                               
+                               if (!hitOOM)
+                               {
+                                       Commit(0);
+                               }
+                               
+                               if (infoStream != null)
+                                       Message("at close: " + SegString());
+                               
+                               lock (this)
+                               {
+                                       readerPool.Close();
+                                       docWriter = null;
+                                       deleter.Close();
+                               }
+                               
+                               if (closeDir)
+                                       directory.Close();
+                               
+                               if (writeLock != null)
+                               {
+                                       writeLock.Release(); // release write lock
+                                       writeLock = null;
+                               }
+                               lock (this)
+                               {
+                                       closed = true;
+                               }
+                       }
+                       catch (System.OutOfMemoryException oom)
+                       {
+                               HandleOOM(oom, "closeInternal");
+                       }
+                       finally
+                       {
+                               lock (this)
+                               {
+                                       closing = false;
+                                       System.Threading.Monitor.PulseAll(this);
+                                       if (!closed)
+                                       {
+                                               if (docWriter != null)
+                                                       docWriter.ResumeAllThreads();
+                                               if (infoStream != null)
+                                                       Message("hit exception while closing");
+                                       }
+                               }
+                       }
+               }
+               
+               /// <summary>Tells the docWriter to close its currently open shared
+               /// doc stores (stored fields &amp; vectors files).
+               /// Return value specifices whether new doc store files are compound or not.
+               /// </summary>
+               private bool FlushDocStores()
+               {
+                       lock (this)
+                       {
+                if (infoStream != null)
+                {
+                    Message("flushDocStores segment=" + docWriter.GetDocStoreSegment());
+                }
+
+                               bool useCompoundDocStore = false;
+                if (infoStream != null)
+                {
+                    Message("closeDocStores segment=" + docWriter.GetDocStoreSegment());
+                }
+
+                               System.String docStoreSegment;
+                               
+                               bool success = false;
+                               try
+                               {
+                                       docStoreSegment = docWriter.CloseDocStore();
+                                       success = true;
+                               }
+                               finally
+                               {
+                                       if (!success && infoStream != null)
+                                       {
+                                               Message("hit exception closing doc store segment");
+                                       }
+                               }
+
+                if (infoStream != null)
+                {
+                    Message("flushDocStores files=" + docWriter.ClosedFiles());
+                }
+
+                               useCompoundDocStore = mergePolicy.UseCompoundDocStore(segmentInfos);
+                               
+                               if (useCompoundDocStore && docStoreSegment != null && docWriter.ClosedFiles().Count != 0)
+                               {
+                                       // Now build compound doc store file
+                                       
+                                       if (infoStream != null)
+                                       {
+                                               Message("create compound file " + docStoreSegment + "." + IndexFileNames.COMPOUND_FILE_STORE_EXTENSION);
+                                       }
+                                       
+                                       success = false;
+                                       
+                                       int numSegments = segmentInfos.Count;
+                                       System.String compoundFileName = docStoreSegment + "." + IndexFileNames.COMPOUND_FILE_STORE_EXTENSION;
+                                       
+                                       try
+                                       {
+                                               CompoundFileWriter cfsWriter = new CompoundFileWriter(directory, compoundFileName);
+                                               System.Collections.IEnumerator it = docWriter.ClosedFiles().GetEnumerator();
+                                               while (it.MoveNext())
+                                               {
+                                                       cfsWriter.AddFile((System.String) it.Current);
+                                               }
+                                               
+                                               // Perform the merge
+                                               cfsWriter.Close();
+                                               success = true;
+                                       }
+                                       finally
+                                       {
+                                               if (!success)
+                                               {
+                                                       if (infoStream != null)
+                                                               Message("hit exception building compound file doc store for segment " + docStoreSegment);
+                                                       deleter.DeleteFile(compoundFileName);
+                                               }
+                                       }
+                                       
+                                       for (int i = 0; i < numSegments; i++)
+                                       {
+                                               SegmentInfo si = segmentInfos.Info(i);
+                                               if (si.GetDocStoreOffset() != - 1 && si.GetDocStoreSegment().Equals(docStoreSegment))
+                                                       si.SetDocStoreIsCompoundFile(true);
+                                       }
+                                       
+                                       Checkpoint();
+                                       
+                                       // In case the files we just merged into a CFS were
+                                       // not previously checkpointed:
+                                       deleter.DeleteNewFiles(docWriter.ClosedFiles());
+                               }
+                               
+                               return useCompoundDocStore;
+                       }
+               }
+               
+               /// <summary>Returns the Directory used by this index. </summary>
+               public virtual Directory GetDirectory()
+               {
+                       // Pass false because the flush during closing calls getDirectory
+                       EnsureOpen(false);
+                       return directory;
+               }
+               
+               /// <summary>Returns the analyzer used by this index. </summary>
+               public virtual Analyzer GetAnalyzer()
+               {
+                       EnsureOpen();
+                       return analyzer;
+               }
+               
+               /// <summary>Returns the number of documents currently in this
+               /// index, not counting deletions.
+               /// </summary>
+               /// <deprecated> Please use {@link #MaxDoc()} (same as this
+               /// method) or {@link #NumDocs()} (also takes deletions
+               /// into account), instead. 
+               /// </deprecated>
+        [Obsolete("Please use MaxDoc() (same as this method) or NumDocs() (also takes deletions into account), instead. ")]
+               public virtual int DocCount()
+               {
+                       lock (this)
+                       {
+                               EnsureOpen();
+                               return MaxDoc();
+                       }
+               }
+               
+               /// <summary>Returns total number of docs in this index, including
+               /// docs not yet flushed (still in the RAM buffer),
+               /// not counting deletions.
+               /// </summary>
+               /// <seealso cref="numDocs">
+               /// </seealso>
+               public virtual int MaxDoc()
+               {
+                       lock (this)
+                       {
+                               int count;
+                               if (docWriter != null)
+                                       count = docWriter.GetNumDocsInRAM();
+                               else
+                                       count = 0;
+                               
+                               for (int i = 0; i < segmentInfos.Count; i++)
+                                       count += segmentInfos.Info(i).docCount;
+                               return count;
+                       }
+               }
+               
+               /// <summary>Returns total number of docs in this index, including
+               /// docs not yet flushed (still in the RAM buffer), and
+               /// including deletions.  <b>NOTE:</b> buffered deletions
+               /// are not counted.  If you really need these to be
+               /// counted you should call {@link #Commit()} first.
+               /// </summary>
+               /// <seealso cref="numDocs">
+               /// </seealso>
+               public virtual int NumDocs()
+               {
+                       lock (this)
+                       {
+                               int count;
+                               if (docWriter != null)
+                                       count = docWriter.GetNumDocsInRAM();
+                               else
+                                       count = 0;
+                               
+                               for (int i = 0; i < segmentInfos.Count; i++)
+                               {
+                                       SegmentInfo info = segmentInfos.Info(i);
+                                       count += info.docCount - info.GetDelCount();
+                               }
+                               return count;
+                       }
+               }
+               
+               public virtual bool HasDeletions()
+               {
+                       lock (this)
+                       {
+                               EnsureOpen();
+                               if (docWriter.HasDeletes())
+                                       return true;
+                               for (int i = 0; i < segmentInfos.Count; i++)
+                                       if (segmentInfos.Info(i).HasDeletions())
+                                               return true;
+                               return false;
+                       }
+               }
+               
+               /// <summary> The maximum number of terms that will be indexed for a single field in a
+               /// document.  This limits the amount of memory required for indexing, so that
+               /// collections with very large files will not crash the indexing process by
+               /// running out of memory.<p/>
+               /// Note that this effectively truncates large documents, excluding from the
+               /// index terms that occur further in the document.  If you know your source
+               /// documents are large, be sure to set this value high enough to accomodate
+               /// the expected size.  If you set it to Integer.MAX_VALUE, then the only limit
+               /// is your memory, but you should anticipate an OutOfMemoryError.<p/>
+               /// By default, no more than 10,000 terms will be indexed for a field.
+               /// 
+               /// </summary>
+               /// <seealso cref="MaxFieldLength">
+               /// </seealso>
+               private int maxFieldLength;
+               
+               /// <summary> Adds a document to this index.  If the document contains more than
+               /// {@link #SetMaxFieldLength(int)} terms for a given field, the remainder are
+               /// discarded.
+               /// 
+               /// <p/> Note that if an Exception is hit (for example disk full)
+               /// then the index will be consistent, but this document
+               /// may not have been added.  Furthermore, it's possible
+               /// the index will have one segment in non-compound format
+               /// even when using compound files (when a merge has
+               /// partially succeeded).<p/>
+               /// 
+               /// <p/> This method periodically flushes pending documents
+               /// to the Directory (see <a href="#flush">above</a>), and
+               /// also periodically triggers segment merges in the index
+               /// according to the {@link MergePolicy} in use.<p/>
+               /// 
+               /// <p/>Merges temporarily consume space in the
+               /// directory. The amount of space required is up to 1X the
+               /// size of all segments being merged, when no
+               /// readers/searchers are open against the index, and up to
+               /// 2X the size of all segments being merged when
+               /// readers/searchers are open against the index (see
+               /// {@link #Optimize()} for details). The sequence of
+               /// primitive merge operations performed is governed by the
+               /// merge policy.
+               /// 
+               /// <p/>Note that each term in the document can be no longer
+               /// than 16383 characters, otherwise an
+               /// IllegalArgumentException will be thrown.<p/>
+               /// 
+               /// <p/>Note that it's possible to create an invalid Unicode
+               /// string in java if a UTF16 surrogate pair is malformed.
+               /// In this case, the invalid characters are silently
+               /// replaced with the Unicode replacement character
+               /// U+FFFD.<p/>
+               /// 
+               /// <p/><b>NOTE</b>: if this method hits an OutOfMemoryError
+               /// you should immediately close the writer.  See <a
+               /// href="#OOME">above</a> for details.<p/>
+               /// 
+               /// </summary>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               public virtual void  AddDocument(Document doc)
+               {
+                       AddDocument(doc, analyzer);
+               }
+               
+               /// <summary> Adds a document to this index, using the provided analyzer instead of the
+               /// value of {@link #GetAnalyzer()}.  If the document contains more than
+               /// {@link #SetMaxFieldLength(int)} terms for a given field, the remainder are
+               /// discarded.
+               /// 
+               /// <p/>See {@link #AddDocument(Document)} for details on
+               /// index and IndexWriter state after an Exception, and
+               /// flushing/merging temporary free space requirements.<p/>
+               /// 
+               /// <p/><b>NOTE</b>: if this method hits an OutOfMemoryError
+               /// you should immediately close the writer.  See <a
+               /// href="#OOME">above</a> for details.<p/>
+               /// 
+               /// </summary>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               public virtual void  AddDocument(Document doc, Analyzer analyzer)
+               {
+                       EnsureOpen();
+                       bool doFlush = false;
+                       bool success = false;
+                       try
+                       {
+                               try
+                               {
+                                       doFlush = docWriter.AddDocument(doc, analyzer);
+                                       success = true;
+                               }
+                               finally
+                               {
+                                       if (!success)
+                                       {
+                                               
+                                               if (infoStream != null)
+                                                       Message("hit exception adding document");
+                                               
+                                               lock (this)
+                                               {
+                                                       // If docWriter has some aborted files that were
+                                                       // never incref'd, then we clean them up here
+                                                       if (docWriter != null)
+                                                       {
+                                System.Collections.Generic.ICollection<string> files = docWriter.AbortedFiles();
+                                                               if (files != null)
+                                                                       deleter.DeleteNewFiles(files);
+                                                       }
+                                               }
+                                       }
+                               }
+                               if (doFlush)
+                                       Flush(true, false, false);
+                       }
+                       catch (System.OutOfMemoryException oom)
+                       {
+                               HandleOOM(oom, "addDocument");
+                       }
+               }
+               
+               /// <summary> Deletes the document(s) containing <code>term</code>.
+               /// 
+               /// <p/><b>NOTE</b>: if this method hits an OutOfMemoryError
+               /// you should immediately close the writer.  See <a
+               /// href="#OOME">above</a> for details.<p/>
+               /// 
+               /// </summary>
+               /// <param name="term">the term to identify the documents to be deleted
+               /// </param>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               public virtual void  DeleteDocuments(Term term)
+               {
+                       EnsureOpen();
+                       try
+                       {
+                               bool doFlush = docWriter.BufferDeleteTerm(term);
+                               if (doFlush)
+                                       Flush(true, false, false);
+                       }
+                       catch (System.OutOfMemoryException oom)
+                       {
+                               HandleOOM(oom, "deleteDocuments(Term)");
+                       }
+               }
+               
+               /// <summary> Deletes the document(s) containing any of the
+               /// terms. All deletes are flushed at the same time.
+               /// 
+               /// <p/><b>NOTE</b>: if this method hits an OutOfMemoryError
+               /// you should immediately close the writer.  See <a
+               /// href="#OOME">above</a> for details.<p/>
+               /// 
+               /// </summary>
+               /// <param name="terms">array of terms to identify the documents
+               /// to be deleted
+               /// </param>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               public virtual void  DeleteDocuments(Term[] terms)
+               {
+                       EnsureOpen();
+                       try
+                       {
+                               bool doFlush = docWriter.BufferDeleteTerms(terms);
+                               if (doFlush)
+                                       Flush(true, false, false);
+                       }
+                       catch (System.OutOfMemoryException oom)
+                       {
+                               HandleOOM(oom, "deleteDocuments(Term[])");
+                       }
+               }
+               
+               /// <summary> Deletes the document(s) matching the provided query.
+               /// 
+               /// <p/><b>NOTE</b>: if this method hits an OutOfMemoryError
+               /// you should immediately close the writer.  See <a
+               /// href="#OOME">above</a> for details.<p/>
+               /// 
+               /// </summary>
+               /// <param name="query">the query to identify the documents to be deleted
+               /// </param>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               public virtual void  DeleteDocuments(Query query)
+               {
+                       EnsureOpen();
+                       bool doFlush = docWriter.BufferDeleteQuery(query);
+                       if (doFlush)
+                               Flush(true, false, false);
+               }
+               
+               /// <summary> Deletes the document(s) matching any of the provided queries.
+               /// All deletes are flushed at the same time.
+               /// 
+               /// <p/><b>NOTE</b>: if this method hits an OutOfMemoryError
+               /// you should immediately close the writer.  See <a
+               /// href="#OOME">above</a> for details.<p/>
+               /// 
+               /// </summary>
+               /// <param name="queries">array of queries to identify the documents
+               /// to be deleted
+               /// </param>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               public virtual void  DeleteDocuments(Query[] queries)
+               {
+                       EnsureOpen();
+                       bool doFlush = docWriter.BufferDeleteQueries(queries);
+                       if (doFlush)
+                               Flush(true, false, false);
+               }
+               
+               /// <summary> Updates a document by first deleting the document(s)
+               /// containing <code>term</code> and then adding the new
+               /// document.  The delete and then add are atomic as seen
+               /// by a reader on the same index (flush may happen only after
+               /// the add).
+               /// 
+               /// <p/><b>NOTE</b>: if this method hits an OutOfMemoryError
+               /// you should immediately close the writer.  See <a
+               /// href="#OOME">above</a> for details.<p/>
+               /// 
+               /// </summary>
+               /// <param name="term">the term to identify the document(s) to be
+               /// deleted
+               /// </param>
+               /// <param name="doc">the document to be added
+               /// </param>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               public virtual void  UpdateDocument(Term term, Document doc)
+               {
+                       EnsureOpen();
+                       UpdateDocument(term, doc, GetAnalyzer());
+               }
+               
+               /// <summary> Updates a document by first deleting the document(s)
+               /// containing <code>term</code> and then adding the new
+               /// document.  The delete and then add are atomic as seen
+               /// by a reader on the same index (flush may happen only after
+               /// the add).
+               /// 
+               /// <p/><b>NOTE</b>: if this method hits an OutOfMemoryError
+               /// you should immediately close the writer.  See <a
+               /// href="#OOME">above</a> for details.<p/>
+               /// 
+               /// </summary>
+               /// <param name="term">the term to identify the document(s) to be
+               /// deleted
+               /// </param>
+               /// <param name="doc">the document to be added
+               /// </param>
+               /// <param name="analyzer">the analyzer to use when analyzing the document
+               /// </param>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               public virtual void  UpdateDocument(Term term, Document doc, Analyzer analyzer)
+               {
+                       EnsureOpen();
+                       try
+                       {
+                               bool doFlush = false;
+                               bool success = false;
+                               try
+                               {
+                                       doFlush = docWriter.UpdateDocument(term, doc, analyzer);
+                                       success = true;
+                               }
+                               finally
+                               {
+                                       if (!success)
+                                       {
+                                               
+                                               if (infoStream != null)
+                                                       Message("hit exception updating document");
+                                               
+                                               lock (this)
+                                               {
+                                                       // If docWriter has some aborted files that were
+                                                       // never incref'd, then we clean them up here
+                            System.Collections.Generic.ICollection<string> files = docWriter.AbortedFiles();
+                                                       if (files != null)
+                                                               deleter.DeleteNewFiles(files);
+                                               }
+                                       }
+                               }
+                               if (doFlush)
+                                       Flush(true, false, false);
+                       }
+                       catch (System.OutOfMemoryException oom)
+                       {
+                               HandleOOM(oom, "updateDocument");
+                       }
+               }
+               
+               // for test purpose
+               public /*internal*/ int GetSegmentCount()
+               {
+                       lock (this)
+                       {
+                               return segmentInfos.Count;
+                       }
+               }
+               
+               // for test purpose
+               public /*internal*/ int GetNumBufferedDocuments()
+               {
+                       lock (this)
+                       {
+                               return docWriter.GetNumDocsInRAM();
+                       }
+               }
+               
+               // for test purpose
+               public /*internal*/ int GetDocCount(int i)
+               {
+                       lock (this)
+                       {
+                               if (i >= 0 && i < segmentInfos.Count)
+                               {
+                                       return segmentInfos.Info(i).docCount;
+                               }
+                               else
+                               {
+                                       return - 1;
+                               }
+                       }
+               }
+               
+               // for test purpose
+               public /*internal*/ int GetFlushCount()
+               {
+                       lock (this)
+                       {
+                               return flushCount;
+                       }
+               }
+               
+               // for test purpose
+               public /*internal*/ int GetFlushDeletesCount()
+               {
+                       lock (this)
+                       {
+                               return flushDeletesCount;
+                       }
+               }
+               
+               internal System.String NewSegmentName()
+               {
+                       // Cannot synchronize on IndexWriter because that causes
+                       // deadlock
+                       lock (segmentInfos)
+                       {
+                               // Important to increment changeCount so that the
+                               // segmentInfos is written on close.  Otherwise we
+                               // could close, re-open and re-return the same segment
+                               // name that was previously returned which can cause
+                               // problems at least with ConcurrentMergeScheduler.
+                               changeCount++;
+                               return "_" + SupportClass.Number.ToString(segmentInfos.counter++);
+                       }
+               }
+               
+               /// <summary>If non-null, information about merges will be printed to this.</summary>
+               private System.IO.StreamWriter infoStream = null;
+               private static System.IO.StreamWriter defaultInfoStream = null;
+               
+               /// <summary> Requests an "optimize" operation on an index, priming the index
+               /// for the fastest available search. Traditionally this has meant
+               /// merging all segments into a single segment as is done in the
+               /// default merge policy, but individaul merge policies may implement
+               /// optimize in different ways.
+               /// 
+               /// <p/>It is recommended that this method be called upon completion of indexing.  In
+               /// environments with frequent updates, optimize is best done during low volume times, if at all. 
+               /// 
+               /// <p/>
+               /// <p/>See http://www.gossamer-threads.com/lists/lucene/java-dev/47895 for more discussion. <p/>
+               /// 
+               /// <p/>Note that optimize requires 2X the index size free
+               /// space in your Directory (3X if you're using compound
+        /// file format).  For example, if your index
+               /// size is 10 MB then you need 20 MB free for optimize to
+        /// complete (30 MB if you're using compound fiel format).<p/>
+               /// 
+               /// <p/>If some but not all readers re-open while an
+               /// optimize is underway, this will cause > 2X temporary
+               /// space to be consumed as those new readers will then
+               /// hold open the partially optimized segments at that
+               /// time.  It is best not to re-open readers while optimize
+               /// is running.<p/>
+               /// 
+               /// <p/>The actual temporary usage could be much less than
+               /// these figures (it depends on many factors).<p/>
+               /// 
+               /// <p/>In general, once the optimize completes, the total size of the
+               /// index will be less than the size of the starting index.
+               /// It could be quite a bit smaller (if there were many
+               /// pending deletes) or just slightly smaller.<p/>
+               /// 
+               /// <p/>If an Exception is hit during optimize(), for example
+               /// due to disk full, the index will not be corrupt and no
+               /// documents will have been lost.  However, it may have
+               /// been partially optimized (some segments were merged but
+               /// not all), and it's possible that one of the segments in
+               /// the index will be in non-compound format even when
+               /// using compound file format.  This will occur when the
+               /// Exception is hit during conversion of the segment into
+               /// compound format.<p/>
+               /// 
+               /// <p/>This call will optimize those segments present in
+               /// the index when the call started.  If other threads are
+               /// still adding documents and flushing segments, those
+               /// newly created segments will not be optimized unless you
+               /// call optimize again.<p/>
+               /// 
+               /// <p/><b>NOTE</b>: if this method hits an OutOfMemoryError
+               /// you should immediately close the writer.  See <a
+               /// href="#OOME">above</a> for details.<p/>
+               /// 
+               /// </summary>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               /// <seealso cref="LogMergePolicy.findMergesForOptimize">
+               /// </seealso>
+               public virtual void  Optimize()
+               {
+                       Optimize(true);
+               }
+
+        /// <summary> Optimize the index down to &lt;= maxNumSegments.  If
+               /// maxNumSegments==1 then this is the same as {@link
+               /// #Optimize()}.
+               /// 
+               /// <p/><b>NOTE</b>: if this method hits an OutOfMemoryError
+               /// you should immediately close the writer.  See <a
+               /// href="#OOME">above</a> for details.<p/>
+               /// 
+               /// </summary>
+               /// <param name="maxNumSegments">maximum number of segments left
+               /// in the index after optimization finishes
+               /// </param>
+               public virtual void  Optimize(int maxNumSegments)
+               {
+                       Optimize(maxNumSegments, true);
+               }
+               
+               /// <summary>Just like {@link #Optimize()}, except you can specify
+               /// whether the call should block until the optimize
+               /// completes.  This is only meaningful with a
+               /// {@link MergeScheduler} that is able to run merges in
+               /// background threads.
+               /// 
+               /// <p/><b>NOTE</b>: if this method hits an OutOfMemoryError
+               /// you should immediately close the writer.  See <a
+               /// href="#OOME">above</a> for details.<p/>
+               /// </summary>
+               public virtual void  Optimize(bool doWait)
+               {
+                       Optimize(1, doWait);
+               }
+               
+               /// <summary>Just like {@link #Optimize(int)}, except you can
+               /// specify whether the call should block until the
+               /// optimize completes.  This is only meaningful with a
+               /// {@link MergeScheduler} that is able to run merges in
+               /// background threads.
+               /// 
+               /// <p/><b>NOTE</b>: if this method hits an OutOfMemoryError
+               /// you should immediately close the writer.  See <a
+               /// href="#OOME">above</a> for details.<p/>
+               /// </summary>
+               public virtual void  Optimize(int maxNumSegments, bool doWait)
+               {
+                       EnsureOpen();
+                       
+                       if (maxNumSegments < 1)
+                               throw new System.ArgumentException("maxNumSegments must be >= 1; got " + maxNumSegments);
+                       
+                       if (infoStream != null)
+                               Message("optimize: index now " + SegString());
+                       
+                       Flush(true, false, true);
+                       
+                       lock (this)
+                       {
+                               ResetMergeExceptions();
+                               segmentsToOptimize = new System.Collections.Hashtable();
+                optimizeMaxNumSegments = maxNumSegments;
+                               int numSegments = segmentInfos.Count;
+                               for (int i = 0; i < numSegments; i++)
+                                       SupportClass.CollectionsHelper.AddIfNotContains(segmentsToOptimize, segmentInfos.Info(i));
+                               
+                               // Now mark all pending & running merges as optimize
+                               // merge:
+                               System.Collections.IEnumerator it = pendingMerges.GetEnumerator();
+                               while (it.MoveNext())
+                               {
+                                       MergePolicy.OneMerge merge = (MergePolicy.OneMerge) it.Current;
+                                       merge.optimize = true;
+                                       merge.maxNumSegmentsOptimize = maxNumSegments;
+                               }
+                               
+                               it = runningMerges.GetEnumerator();
+                               while (it.MoveNext())
+                               {
+                                       MergePolicy.OneMerge merge = (MergePolicy.OneMerge) it.Current;
+                                       merge.optimize = true;
+                                       merge.maxNumSegmentsOptimize = maxNumSegments;
+                               }
+                       }
+                       
+                       MaybeMerge(maxNumSegments, true);
+                       
+                       if (doWait)
+                       {
+                               lock (this)
+                               {
+                                       while (true)
+                                       {
+                                               
+                                               if (hitOOM)
+                                               {
+                                                       throw new System.SystemException("this writer hit an OutOfMemoryError; cannot complete optimize");
+                                               }
+                                               
+                                               if (mergeExceptions.Count > 0)
+                                               {
+                                                       // Forward any exceptions in background merge
+                                                       // threads to the current thread:
+                                                       int size = mergeExceptions.Count;
+                                                       for (int i = 0; i < size; i++)
+                                                       {
+                                                               MergePolicy.OneMerge merge = (MergePolicy.OneMerge) mergeExceptions[0];
+                                                               if (merge.optimize)
+                                                               {
+                                    System.IO.IOException err;
+                                                                       System.Exception t = merge.GetException();
+                                    if (t != null)
+                                                                           err = new System.IO.IOException("background merge hit exception: " + merge.SegString(directory), t);
+                                    else
+                                        err = new System.IO.IOException("background merge hit exception: " + merge.SegString(directory));
+                                                                       throw err;
+                                                               }
+                                                       }
+                                               }
+                                               
+                                               if (OptimizeMergesPending())
+                                                       DoWait();
+                                               else
+                                                       break;
+                                       }
+                               }
+                               
+                               // If close is called while we are still
+                               // running, throw an exception so the calling
+                               // thread will know the optimize did not
+                               // complete
+                               EnsureOpen();
+                       }
+                       
+                       // NOTE: in the ConcurrentMergeScheduler case, when
+                       // doWait is false, we can return immediately while
+                       // background threads accomplish the optimization
+               }
+               
+               /// <summary>Returns true if any merges in pendingMerges or
+               /// runningMerges are optimization merges. 
+               /// </summary>
+               private bool OptimizeMergesPending()
+               {
+                       lock (this)
+                       {
+                System.Collections.Generic.LinkedList<MergePolicy.OneMerge>.Enumerator it =  pendingMerges.GetEnumerator();
+                while (it.MoveNext())
+                {
+                    if (it.Current.optimize) return true;
+                }
+
+                System.Collections.Generic.List<MergePolicy.OneMerge>.Enumerator it2 = runningMerges.GetEnumerator();
+                while (it2.MoveNext())
+                {
+                    if (it2.Current.optimize) return true;
+                }
+                               
+                               return false;
+                       }
+               }
+               
+               /// <summary>Just like {@link #ExpungeDeletes()}, except you can
+               /// specify whether the call should block until the
+               /// operation completes.  This is only meaningful with a
+               /// {@link MergeScheduler} that is able to run merges in
+               /// background threads.
+               /// 
+               /// <p/><b>NOTE</b>: if this method hits an OutOfMemoryError
+               /// you should immediately close the writer.  See <a
+               /// href="#OOME">above</a> for details.<p/>
+               /// </summary>
+               public virtual void  ExpungeDeletes(bool doWait)
+               {
+                       EnsureOpen();
+                       
+                       if (infoStream != null)
+                               Message("expungeDeletes: index now " + SegString());
+                       
+                       MergePolicy.MergeSpecification spec;
+                       
+                       lock (this)
+                       {
+                               spec = mergePolicy.FindMergesToExpungeDeletes(segmentInfos);
+                               if (spec != null)
+                               {
+                                       int numMerges = spec.merges.Count;
+                                       for (int i = 0; i < numMerges; i++)
+                                               RegisterMerge((MergePolicy.OneMerge) spec.merges[i]);
+                               }
+                       }
+                       
+                       mergeScheduler.Merge(this);
+                       
+                       if (spec != null && doWait)
+                       {
+                               int numMerges = spec.merges.Count;
+                               lock (this)
+                               {
+                                       bool running = true;
+                                       while (running)
+                                       {
+                                               
+                                               if (hitOOM)
+                                               {
+                                                       throw new System.SystemException("this writer hit an OutOfMemoryError; cannot complete expungeDeletes");
+                                               }
+                                               
+                                               // Check each merge that MergePolicy asked us to
+                                               // do, to see if any of them are still running and
+                                               // if any of them have hit an exception.
+                                               running = false;
+                                               for (int i = 0; i < numMerges; i++)
+                                               {
+                                                       MergePolicy.OneMerge merge = (MergePolicy.OneMerge) spec.merges[i];
+                                                       if (pendingMerges.Contains(merge) || runningMerges.Contains(merge))
+                                                               running = true;
+                                                       System.Exception t = merge.GetException();
+                                                       if (t != null)
+                                                       {
+                                                               System.IO.IOException ioe = new System.IO.IOException("background merge hit exception: " + merge.SegString(directory), t);
+                                                               throw ioe;
+                                                       }
+                                               }
+                                               
+                                               // If any of our merges are still running, wait:
+                                               if (running)
+                                                       DoWait();
+                                       }
+                               }
+                       }
+                       
+                       // NOTE: in the ConcurrentMergeScheduler case, when
+                       // doWait is false, we can return immediately while
+                       // background threads accomplish the optimization
+               }
+               
+               
+               /// <summary>Expunges all deletes from the index.  When an index
+               /// has many document deletions (or updates to existing
+               /// documents), it's best to either call optimize or
+               /// expungeDeletes to remove all unused data in the index
+               /// associated with the deleted documents.  To see how
+               /// many deletions you have pending in your index, call
+               /// {@link IndexReader#numDeletedDocs}
+               /// This saves disk space and memory usage while
+               /// searching.  expungeDeletes should be somewhat faster
+               /// than optimize since it does not insist on reducing the
+               /// index to a single segment (though, this depends on the
+               /// {@link MergePolicy}; see {@link
+               /// MergePolicy#findMergesToExpungeDeletes}.). Note that
+               /// this call does not first commit any buffered
+               /// documents, so you must do so yourself if necessary.
+               /// See also {@link #ExpungeDeletes(boolean)}
+               /// 
+               /// <p/><b>NOTE</b>: if this method hits an OutOfMemoryError
+               /// you should immediately close the writer.  See <a
+               /// href="#OOME">above</a> for details.<p/>
+               /// </summary>
+               public virtual void  ExpungeDeletes()
+               {
+                       ExpungeDeletes(true);
+               }
+               
+               /// <summary> Expert: asks the mergePolicy whether any merges are
+               /// necessary now and if so, runs the requested merges and
+               /// then iterate (test again if merges are needed) until no
+               /// more merges are returned by the mergePolicy.
+               /// 
+               /// Explicit calls to maybeMerge() are usually not
+               /// necessary. The most common case is when merge policy
+               /// parameters have changed.
+               /// 
+               /// <p/><b>NOTE</b>: if this method hits an OutOfMemoryError
+               /// you should immediately close the writer.  See <a
+               /// href="#OOME">above</a> for details.<p/>
+               /// </summary>
+               public void  MaybeMerge()
+               {
+                       MaybeMerge(false);
+               }
+               
+               private void  MaybeMerge(bool optimize)
+               {
+                       MaybeMerge(1, optimize);
+               }
+               
+               private void  MaybeMerge(int maxNumSegmentsOptimize, bool optimize)
+               {
+                       UpdatePendingMerges(maxNumSegmentsOptimize, optimize);
+                       mergeScheduler.Merge(this);
+               }
+               
+               private void  UpdatePendingMerges(int maxNumSegmentsOptimize, bool optimize)
+               {
+                       lock (this)
+                       {
+                               System.Diagnostics.Debug.Assert(!optimize || maxNumSegmentsOptimize > 0);
+
+                if (stopMerges)
+                {
+                    return;
+                }
+                               
+                               // Do not start new merges if we've hit OOME
+                               if (hitOOM)
+                               {
+                                       return ;
+                               }
+                               
+                               MergePolicy.MergeSpecification spec;
+                               if (optimize)
+                               {
+                                       spec = mergePolicy.FindMergesForOptimize(segmentInfos, maxNumSegmentsOptimize, segmentsToOptimize);
+                                       
+                                       if (spec != null)
+                                       {
+                                               int numMerges = spec.merges.Count;
+                                               for (int i = 0; i < numMerges; i++)
+                                               {
+                                                       MergePolicy.OneMerge merge = ((MergePolicy.OneMerge) spec.merges[i]);
+                                                       merge.optimize = true;
+                                                       merge.maxNumSegmentsOptimize = maxNumSegmentsOptimize;
+                                               }
+                                       }
+                               }
+                               else
+                                       spec = mergePolicy.FindMerges(segmentInfos);
+                               
+                               if (spec != null)
+                               {
+                                       int numMerges = spec.merges.Count;
+                                       for (int i = 0; i < numMerges; i++)
+                                               RegisterMerge((MergePolicy.OneMerge) spec.merges[i]);
+                               }
+                       }
+               }
+
+        public virtual MergePolicy.OneMerge GetNextMerge_forNUnit()
+        {
+            return GetNextMerge();
+        }
+               
+               /// <summary>Expert: the {@link MergeScheduler} calls this method
+               /// to retrieve the next merge requested by the
+               /// MergePolicy 
+               /// </summary>
+               internal virtual MergePolicy.OneMerge GetNextMerge()
+               {
+                       lock (this)
+                       {
+                               if (pendingMerges.Count == 0)
+                                       return null;
+                               else
+                               {
+                    // Advance the merge from pending to running
+                    MergePolicy.OneMerge merge = (MergePolicy.OneMerge)pendingMerges.First.Value;
+                    pendingMerges.RemoveFirst();
+                    runningMerges.Add(merge);
+                    return merge;
+                               }
+                       }
+               }
+               
+               /// <summary>Like getNextMerge() except only returns a merge if it's
+               /// external. 
+               /// </summary>
+               private MergePolicy.OneMerge GetNextExternalMerge()
+               {
+                       lock (this)
+                       {
+                               if (pendingMerges.Count == 0)
+                                       return null;
+                               else
+                               {
+                    System.Collections.Generic.IEnumerator<MergePolicy.OneMerge> it = pendingMerges.GetEnumerator();
+                                       while (it.MoveNext())
+                                       {
+                        MergePolicy.OneMerge merge = (MergePolicy.OneMerge) it.Current;
+                                               if (merge.isExternal)
+                                               {
+                                                       // Advance the merge from pending to running
+                            pendingMerges.Remove(merge);  // {{Aroush-2.9}} From Mike Garski: this is an O(n) op... is that an issue?
+                            runningMerges.Add(merge);
+                                                       return merge;
+                                               }
+                                       }
+                                       
+                                       // All existing merges do not involve external segments
+                                       return null;
+                               }
+                       }
+               }
+               
+               /*
+               * Begin a transaction.  During a transaction, any segment
+               * merges that happen (or ram segments flushed) will not
+               * write a new segments file and will not remove any files
+               * that were present at the start of the transaction.  You
+               * must make a matched (try/finally) call to
+               * commitTransaction() or rollbackTransaction() to finish
+               * the transaction.
+               *
+               * Note that buffered documents and delete terms are not handled
+               * within the transactions, so they must be flushed before the
+               * transaction is started.
+               */
+               private void  StartTransaction(bool haveReadLock)
+               {
+                       lock (this)
+                       {
+                               
+                               bool success = false;
+                               try
+                               {
+                                       if (infoStream != null)
+                                               Message("now start transaction");
+                                       
+                                       System.Diagnostics.Debug.Assert(docWriter.GetNumBufferedDeleteTerms() == 0 , 
+                                               "calling startTransaction with buffered delete terms not supported: numBufferedDeleteTerms=" + docWriter.GetNumBufferedDeleteTerms());
+                                       System.Diagnostics.Debug.Assert(docWriter.GetNumDocsInRAM() == 0 , 
+                                               "calling startTransaction with buffered documents not supported: numDocsInRAM=" + docWriter.GetNumDocsInRAM());
+                                       
+                                       EnsureOpen();
+                                       
+                                       // If a transaction is trying to roll back (because
+                                       // addIndexes hit an exception) then wait here until
+                                       // that's done:
+                                       lock (this)
+                                       {
+                                               while (stopMerges)
+                                                       DoWait();
+                                       }
+                                       success = true;
+                               }
+                               finally
+                               {
+                                       // Release the write lock if our caller held it, on
+                                       // hitting an exception
+                                       if (!success && haveReadLock)
+                                               ReleaseRead();
+                               }
+                               
+                               if (haveReadLock)
+                               {
+                                       UpgradeReadToWrite();
+                               }
+                               else
+                               {
+                                       AcquireWrite();
+                               }
+                               
+                               success = false;
+                               try
+                               {
+                                       localRollbackSegmentInfos = (SegmentInfos) segmentInfos.Clone();
+                                       
+                                       System.Diagnostics.Debug.Assert(!HasExternalSegments());
+                                       
+                                       localAutoCommit = autoCommit;
+                                       localFlushedDocCount = docWriter.GetFlushedDocCount();
+                                       
+                                       if (localAutoCommit)
+                                       {
+                                               
+                                               if (infoStream != null)
+                                                       Message("flush at startTransaction");
+                                               
+                                               Flush(true, false, false);
+                                               
+                                               // Turn off auto-commit during our local transaction:
+                                               autoCommit = false;
+                                       }
+                                       // We must "protect" our files at this point from
+                                       // deletion in case we need to rollback:
+                                       else
+                                               deleter.IncRef(segmentInfos, false);
+                                       
+                                       success = true;
+                               }
+                               finally
+                               {
+                                       if (!success)
+                                               FinishAddIndexes();
+                               }
+                       }
+               }
+               
+               /*
+               * Rolls back the transaction and restores state to where
+               * we were at the start.
+               */
+               private void  RollbackTransaction()
+               {
+                       lock (this)
+                       {
+                               
+                               if (infoStream != null)
+                                       Message("now rollback transaction");
+                               
+                               // First restore autoCommit in case we hit an exception below:
+                               autoCommit = localAutoCommit;
+                               if (docWriter != null)
+                               {
+                                       docWriter.SetFlushedDocCount(localFlushedDocCount);
+                               }
+                               
+                               // Must finish merges before rolling back segmentInfos
+                               // so merges don't hit exceptions on trying to commit
+                               // themselves, don't get files deleted out from under
+                               // them, etc:
+                               FinishMerges(false);
+                               
+                               // Keep the same segmentInfos instance but replace all
+                               // of its SegmentInfo instances.  This is so the next
+                               // attempt to commit using this instance of IndexWriter
+                               // will always write to a new generation ("write once").
+                               segmentInfos.Clear();
+                               segmentInfos.AddRange(localRollbackSegmentInfos);
+                               localRollbackSegmentInfos = null;
+                               
+                               // This must come after we rollback segmentInfos, so
+                               // that if a commit() kicks off it does not see the
+                               // segmentInfos with external segments
+                               FinishAddIndexes();
+                               
+                               // Ask deleter to locate unreferenced files we had
+                               // created & remove them:
+                               deleter.Checkpoint(segmentInfos, false);
+                               
+                               if (!autoCommit)
+                               // Remove the incRef we did in startTransaction:
+                                       deleter.DecRef(segmentInfos);
+                               
+                               // Also ask deleter to remove any newly created files
+                               // that were never incref'd; this "garbage" is created
+                               // when a merge kicks off but aborts part way through
+                               // before it had a chance to incRef the files it had
+                               // partially created
+                               deleter.Refresh();
+                               
+                               System.Threading.Monitor.PulseAll(this);
+                               
+                               System.Diagnostics.Debug.Assert(!HasExternalSegments());
+                       }
+               }
+               
+               /*
+               * Commits the transaction.  This will write the new
+               * segments file and remove and pending deletions we have
+               * accumulated during the transaction
+               */
+               private void  CommitTransaction()
+               {
+                       lock (this)
+                       {
+                               
+                               if (infoStream != null)
+                                       Message("now commit transaction");
+                               
+                               // First restore autoCommit in case we hit an exception below:
+                               autoCommit = localAutoCommit;
+                               
+                               // Give deleter a chance to remove files now:
+                               Checkpoint();
+                               
+                               if (autoCommit)
+                               {
+                                       bool success = false;
+                                       try
+                                       {
+                                               Commit(0);
+                                               success = true;
+                                       }
+                                       finally
+                                       {
+                                               if (!success)
+                                               {
+                                                       if (infoStream != null)
+                                                               Message("hit exception committing transaction");
+                                                       RollbackTransaction();
+                                               }
+                                       }
+                               }
+                               // Remove the incRef we did in startTransaction.
+                               else
+                                       deleter.DecRef(localRollbackSegmentInfos);
+                               
+                               localRollbackSegmentInfos = null;
+                               
+                               System.Diagnostics.Debug.Assert(!HasExternalSegments());
+                               
+                               FinishAddIndexes();
+                       }
+               }
+               
+               /// <deprecated> Please use {@link #rollback} instead.
+               /// </deprecated>
+        [Obsolete("Please use Rollback instead.")]
+               public virtual void  Abort()
+               {
+                       Rollback();
+               }
+               
+               /// <summary> Close the <code>IndexWriter</code> without committing
+               /// any changes that have occurred since the last commit
+               /// (or since it was opened, if commit hasn't been called).
+               /// This removes any temporary files that had been created,
+               /// after which the state of the index will be the same as
+               /// it was when commit() was last called or when this
+               /// writer was first opened.  This can only be called when
+               /// this IndexWriter was opened with
+               /// <code>autoCommit=false</code>.  This also clears a
+               /// previous call to {@link #prepareCommit}.
+               /// </summary>
+               /// <throws>  IllegalStateException if this is called when </throws>
+               /// <summary>  the writer was opened with <code>autoCommit=true</code>.
+               /// </summary>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               public virtual void  Rollback()
+               {
+                       EnsureOpen();
+                       if (autoCommit)
+                               throw new System.SystemException("rollback() can only be called when IndexWriter was opened with autoCommit=false");
+                       
+                       // Ensure that only one thread actually gets to do the closing:
+                       if (ShouldClose())
+                               RollbackInternal();
+               }
+               
+               private void  RollbackInternal()
+               {
+                       
+                       bool success = false;
+
+            if (infoStream != null)
+            {
+                Message("rollback");
+            }
+                       
+                       docWriter.PauseAllThreads();
+                       
+                       try
+                       {
+                               FinishMerges(false);
+                               
+                               // Must pre-close these two, in case they increment
+                               // changeCount so that we can then set it to false
+                               // before calling closeInternal
+                               mergePolicy.Close();
+                               mergeScheduler.Close();
+                               
+                               lock (this)
+                               {
+                                       
+                                       if (pendingCommit != null)
+                                       {
+                                               pendingCommit.RollbackCommit(directory);
+                                               deleter.DecRef(pendingCommit);
+                                               pendingCommit = null;
+                                               System.Threading.Monitor.PulseAll(this);
+                                       }
+                                       
+                                       // Keep the same segmentInfos instance but replace all
+                                       // of its SegmentInfo instances.  This is so the next
+                                       // attempt to commit using this instance of IndexWriter
+                                       // will always write to a new generation ("write
+                                       // once").
+                                       segmentInfos.Clear();
+                                       segmentInfos.AddRange(rollbackSegmentInfos);
+                                       
+                                       System.Diagnostics.Debug.Assert(!HasExternalSegments());
+                                       
+                                       docWriter.Abort();
+                                       
+                                       System.Diagnostics.Debug.Assert(TestPoint("rollback before checkpoint"));
+                                       
+                                       // Ask deleter to locate unreferenced files & remove
+                                       // them:
+                                       deleter.Checkpoint(segmentInfos, false);
+                                       deleter.Refresh();
+                               }
+                               
+                               // Don't bother saving any changes in our segmentInfos
+                               readerPool.Clear(null);
+                               
+                               lastCommitChangeCount = changeCount;
+                               
+                               success = true;
+                       }
+                       catch (System.OutOfMemoryException oom)
+                       {
+                               HandleOOM(oom, "rollbackInternal");
+                       }
+                       finally
+                       {
+                               lock (this)
+                               {
+                                       if (!success)
+                                       {
+                                               docWriter.ResumeAllThreads();
+                                               closing = false;
+                                               System.Threading.Monitor.PulseAll(this);
+                                               if (infoStream != null)
+                                                       Message("hit exception during rollback");
+                                       }
+                               }
+                       }
+                       
+                       CloseInternal(false);
+               }
+               
+               /// <summary> Delete all documents in the index.
+               /// 
+               /// <p/>This method will drop all buffered documents and will 
+               /// remove all segments from the index. This change will not be
+               /// visible until a {@link #Commit()} has been called. This method
+               /// can be rolled back using {@link #Rollback()}.<p/>
+               /// 
+               /// <p/>NOTE: this method is much faster than using deleteDocuments( new MatchAllDocsQuery() ).<p/>
+               /// 
+               /// <p/>NOTE: this method will forcefully abort all merges
+               /// in progress.  If other threads are running {@link
+               /// #Optimize()} or any of the addIndexes methods, they
+               /// will receive {@link MergePolicy.MergeAbortedException}s.
+               /// </summary>
+               public virtual void  DeleteAll()
+               {
+                       lock (this)
+                       {
+                               docWriter.PauseAllThreads();
+                               try
+                               {
+                                       
+                                       // Abort any running merges
+                                       FinishMerges(false);
+                                       
+                                       // Remove any buffered docs
+                                       docWriter.Abort();
+                                       docWriter.SetFlushedDocCount(0);
+                                       
+                                       // Remove all segments
+                                       segmentInfos.Clear();
+                                       
+                                       // Ask deleter to locate unreferenced files & remove them:
+                                       deleter.Checkpoint(segmentInfos, false);
+                                       deleter.Refresh();
+                                       
+                                       // Don't bother saving any changes in our segmentInfos
+                                       readerPool.Clear(null);
+                                       
+                                       // Mark that the index has changed
+                                       ++changeCount;
+                               }
+                               catch (System.OutOfMemoryException oom)
+                               {
+                                       HandleOOM(oom, "deleteAll");
+                               }
+                               finally
+                               {
+                                       docWriter.ResumeAllThreads();
+                                       if (infoStream != null)
+                                       {
+                                               Message("hit exception during deleteAll");
+                                       }
+                               }
+                       }
+               }
+               
+               private void  FinishMerges(bool waitForMerges)
+               {
+                       lock (this)
+                       {
+                               if (!waitForMerges)
+                               {
+                                       
+                                       stopMerges = true;
+                                       
+                                       // Abort all pending & running merges:
+                                       System.Collections.IEnumerator it = pendingMerges.GetEnumerator();
+                                       while (it.MoveNext())
+                                       {
+                                               MergePolicy.OneMerge merge = (MergePolicy.OneMerge) it.Current;
+                                               if (infoStream != null)
+                                                       Message("now abort pending merge " + merge.SegString(directory));
+                                               merge.Abort();
+                                               MergeFinish(merge);
+                                       }
+                                       pendingMerges.Clear();
+                                       
+                                       it = runningMerges.GetEnumerator();
+                                       while (it.MoveNext())
+                                       {
+                                               MergePolicy.OneMerge merge = (MergePolicy.OneMerge) it.Current;
+                                               if (infoStream != null)
+                                                       Message("now abort running merge " + merge.SegString(directory));
+                                               merge.Abort();
+                                       }
+                                       
+                                       // Ensure any running addIndexes finishes.  It's fine
+                                       // if a new one attempts to start because its merges
+                                       // will quickly see the stopMerges == true and abort.
+                                       AcquireRead();
+                                       ReleaseRead();
+                                       
+                                       // These merges periodically check whether they have
+                                       // been aborted, and stop if so.  We wait here to make
+                                       // sure they all stop.  It should not take very long
+                                       // because the merge threads periodically check if
+                                       // they are aborted.
+                                       while (runningMerges.Count > 0)
+                                       {
+                                               if (infoStream != null)
+                                                       Message("now wait for " + runningMerges.Count + " running merge to abort");
+                                               DoWait();
+                                       }
+                                       
+                                       stopMerges = false;
+                                       System.Threading.Monitor.PulseAll(this);
+                                       
+                                       System.Diagnostics.Debug.Assert(0 == mergingSegments.Count);
+                                       
+                                       if (infoStream != null)
+                                               Message("all running merges have aborted");
+                               }
+                               else
+                               {
+                                       // waitForMerges() will ensure any running addIndexes finishes.  
+                                       // It's fine if a new one attempts to start because from our
+                                       // caller above the call will see that we are in the
+                                       // process of closing, and will throw an
+                                       // AlreadyClosedException.
+                                       WaitForMerges();
+                               }
+                       }
+               }
+               
+               /// <summary> Wait for any currently outstanding merges to finish.
+               /// 
+               /// <p/>It is guaranteed that any merges started prior to calling this method 
+               /// will have completed once this method completes.<p/>
+               /// </summary>
+               public virtual void  WaitForMerges()
+               {
+                       lock (this)
+                       {
+                               // Ensure any running addIndexes finishes.
+                               AcquireRead();
+                               ReleaseRead();
+                               
+                               while (pendingMerges.Count > 0 || runningMerges.Count > 0)
+                               {
+                                       DoWait();
+                               }
+                               
+                               // sanity check
+                               System.Diagnostics.Debug.Assert(0 == mergingSegments.Count);
+                       }
+               }
+               
+               /*
+               * Called whenever the SegmentInfos has been updated and
+               * the index files referenced exist (correctly) in the
+               * index directory.
+               */
+               private void  Checkpoint()
+               {
+                       lock (this)
+                       {
+                               changeCount++;
+                               deleter.Checkpoint(segmentInfos, false);
+                       }
+               }
+               
+               private void  FinishAddIndexes()
+               {
+                       ReleaseWrite();
+               }
+               
+               private void  BlockAddIndexes(bool includePendingClose)
+               {
+                       
+                       AcquireRead();
+                       
+                       bool success = false;
+                       try
+                       {
+                               
+                               // Make sure we are still open since we could have
+                               // waited quite a while for last addIndexes to finish
+                               EnsureOpen(includePendingClose);
+                               success = true;
+                       }
+                       finally
+                       {
+                               if (!success)
+                                       ReleaseRead();
+                       }
+               }
+               
+               private void  ResumeAddIndexes()
+               {
+                       ReleaseRead();
+               }
+               
+               /// <summary>Merges all segments from an array of indexes into this index.
+               /// 
+               /// <p/><b>NOTE</b>: if this method hits an OutOfMemoryError
+               /// you should immediately close the writer.  See <a
+               /// href="#OOME">above</a> for details.<p/>
+               /// 
+               /// </summary>
+               /// <deprecated> Use {@link #addIndexesNoOptimize} instead,
+               /// then separately call {@link #optimize} afterwards if
+               /// you need to.
+               /// 
+               /// </deprecated>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+        [Obsolete("Use {@link #addIndexesNoOptimize} instead,then separately call {@link #optimize} afterwards if you need to.")]
+               public virtual void  AddIndexes(Directory[] dirs)
+               {
+                       
+                       EnsureOpen();
+                       
+                       NoDupDirs(dirs);
+                       
+                       // Do not allow add docs or deletes while we are running:
+                       docWriter.PauseAllThreads();
+                       
+                       try
+                       {
+                               
+                               if (infoStream != null)
+                                       Message("flush at addIndexes");
+                               Flush(true, false, true);
+                               
+                               bool success = false;
+                               
+                               StartTransaction(false);
+                               
+                               try
+                               {
+                                       
+                                       int docCount = 0;
+                                       lock (this)
+                                       {
+                                               EnsureOpen();
+                                               for (int i = 0; i < dirs.Length; i++)
+                                               {
+                                                       SegmentInfos sis = new SegmentInfos(); // read infos from dir
+                                                       sis.Read(dirs[i]);
+                                                       for (int j = 0; j < sis.Count; j++)
+                                                       {
+                                                               SegmentInfo info = sis.Info(j);
+                                                               docCount += info.docCount;
+                                                               System.Diagnostics.Debug.Assert(!segmentInfos.Contains(info));
+                                                               segmentInfos.Add(info); // add each info
+                                                       }
+                                               }
+                                       }
+                                       
+                                       // Notify DocumentsWriter that the flushed count just increased
+                                       docWriter.UpdateFlushedDocCount(docCount);
+                                       
+                                       Optimize();
+                                       
+                                       success = true;
+                               }
+                               finally
+                               {
+                                       if (success)
+                                       {
+                                               CommitTransaction();
+                                       }
+                                       else
+                                       {
+                                               RollbackTransaction();
+                                       }
+                               }
+                       }
+                       catch (System.OutOfMemoryException oom)
+                       {
+                               HandleOOM(oom, "addIndexes(Directory[])");
+                       }
+                       finally
+                       {
+                               if (docWriter != null)
+                               {
+                                       docWriter.ResumeAllThreads();
+                               }
+                       }
+               }
+               
+               private void  ResetMergeExceptions()
+               {
+                       lock (this)
+                       {
+                               mergeExceptions = new System.Collections.ArrayList();
+                               mergeGen++;
+                       }
+               }
+               
+               private void  NoDupDirs(Directory[] dirs)
+               {
+            System.Collections.Generic.Dictionary<Directory, Directory> dups = new System.Collections.Generic.Dictionary<Directory, Directory>();
+                       for (int i = 0; i < dirs.Length; i++)
+                       {
+                if (dups.ContainsKey(dirs[i]))
+                               {
+                                       throw new System.ArgumentException("Directory " + dirs[i] + " appears more than once");
+                               }
+                               if (dirs[i] == directory)
+                                       throw new System.ArgumentException("Cannot add directory to itself");
+                dups[dirs[i]] = dirs[i];
+            }
+               }
+               
+               /// <summary> Merges all segments from an array of indexes into this
+               /// index.
+               /// 
+               /// <p/>This may be used to parallelize batch indexing.  A large document
+               /// collection can be broken into sub-collections.  Each sub-collection can be
+               /// indexed in parallel, on a different thread, process or machine.  The
+               /// complete index can then be created by merging sub-collection indexes
+               /// with this method.
+               /// 
+               /// <p/><b>NOTE:</b> the index in each Directory must not be
+               /// changed (opened by a writer) while this method is
+               /// running.  This method does not acquire a write lock in
+               /// each input Directory, so it is up to the caller to
+               /// enforce this.
+               /// 
+               /// <p/><b>NOTE:</b> while this is running, any attempts to
+               /// add or delete documents (with another thread) will be
+               /// paused until this method completes.
+               /// 
+               /// <p/>This method is transactional in how Exceptions are
+               /// handled: it does not commit a new segments_N file until
+               /// all indexes are added.  This means if an Exception
+               /// occurs (for example disk full), then either no indexes
+               /// will have been added or they all will have been.<p/>
+               /// 
+               /// <p/>Note that this requires temporary free space in the
+               /// Directory up to 2X the sum of all input indexes
+               /// (including the starting index).  If readers/searchers
+               /// are open against the starting index, then temporary
+               /// free space required will be higher by the size of the
+               /// starting index (see {@link #Optimize()} for details).
+               /// <p/>
+               /// 
+               /// <p/>Once this completes, the final size of the index
+               /// will be less than the sum of all input index sizes
+               /// (including the starting index).  It could be quite a
+               /// bit smaller (if there were many pending deletes) or
+               /// just slightly smaller.<p/>
+               /// 
+               /// <p/>
+               /// This requires this index not be among those to be added.
+               /// 
+               /// <p/><b>NOTE</b>: if this method hits an OutOfMemoryError
+               /// you should immediately close the writer.  See <a
+               /// href="#OOME">above</a> for details.<p/>
+               /// 
+               /// </summary>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               public virtual void  AddIndexesNoOptimize(Directory[] dirs)
+               {
+                       
+                       EnsureOpen();
+                       
+                       NoDupDirs(dirs);
+                       
+                       // Do not allow add docs or deletes while we are running:
+                       docWriter.PauseAllThreads();
+                       
+                       try
+                       {
+                               if (infoStream != null)
+                                       Message("flush at addIndexesNoOptimize");
+                               Flush(true, false, true);
+                               
+                               bool success = false;
+                               
+                               StartTransaction(false);
+                               
+                               try
+                               {
+                                       
+                                       int docCount = 0;
+                                       lock (this)
+                                       {
+                                               EnsureOpen();
+                                               
+                                               for (int i = 0; i < dirs.Length; i++)
+                                               {
+                                                       if (directory == dirs[i])
+                                                       {
+                                                               // cannot add this index: segments may be deleted in merge before added
+                                                               throw new System.ArgumentException("Cannot add this index to itself");
+                                                       }
+                                                       
+                                                       SegmentInfos sis = new SegmentInfos(); // read infos from dir
+                                                       sis.Read(dirs[i]);
+                                                       for (int j = 0; j < sis.Count; j++)
+                                                       {
+                                                               SegmentInfo info = sis.Info(j);
+                                                               System.Diagnostics.Debug.Assert(!segmentInfos.Contains(info), "dup info dir=" + info.dir + " name=" + info.name);
+                                                               docCount += info.docCount;
+                                                               segmentInfos.Add(info); // add each info
+                                                       }
+                                               }
+                                       }
+                                       
+                                       // Notify DocumentsWriter that the flushed count just increased
+                                       docWriter.UpdateFlushedDocCount(docCount);
+                                       
+                                       MaybeMerge();
+                                       
+                                       EnsureOpen();
+                                       
+                                       // If after merging there remain segments in the index
+                                       // that are in a different directory, just copy these
+                                       // over into our index.  This is necessary (before
+                                       // finishing the transaction) to avoid leaving the
+                                       // index in an unusable (inconsistent) state.
+                                       ResolveExternalSegments();
+                                       
+                                       EnsureOpen();
+                                       
+                                       success = true;
+                               }
+                               finally
+                               {
+                                       if (success)
+                                       {
+                                               CommitTransaction();
+                                       }
+                                       else
+                                       {
+                                               RollbackTransaction();
+                                       }
+                               }
+                       }
+                       catch (System.OutOfMemoryException oom)
+                       {
+                               HandleOOM(oom, "addIndexesNoOptimize");
+                       }
+                       finally
+                       {
+                               if (docWriter != null)
+                               {
+                                       docWriter.ResumeAllThreads();
+                               }
+                       }
+               }
+               
+               private bool HasExternalSegments()
+               {
+                       return segmentInfos.HasExternalSegments(directory);
+               }
+               
+               /* If any of our segments are using a directory != ours
+               * then we have to either copy them over one by one, merge
+               * them (if merge policy has chosen to) or wait until
+               * currently running merges (in the background) complete.
+               * We don't return until the SegmentInfos has no more
+               * external segments.  Currently this is only used by
+               * addIndexesNoOptimize(). */
+               private void  ResolveExternalSegments()
+               {
+                       
+                       bool any = false;
+                       
+                       bool done = false;
+                       
+                       while (!done)
+                       {
+                               SegmentInfo info = null;
+                               MergePolicy.OneMerge merge = null;
+                               lock (this)
+                               {
+                                       
+                                       if (stopMerges)
+                                               throw new MergePolicy.MergeAbortedException("rollback() was called or addIndexes* hit an unhandled exception");
+                                       
+                                       int numSegments = segmentInfos.Count;
+                                       
+                                       done = true;
+                                       for (int i = 0; i < numSegments; i++)
+                                       {
+                                               info = segmentInfos.Info(i);
+                                               if (info.dir != directory)
+                                               {
+                                                       done = false;
+                                                       MergePolicy.OneMerge newMerge = new MergePolicy.OneMerge(segmentInfos.Range(i, 1 + i), mergePolicy is LogMergePolicy && GetUseCompoundFile());
+                                                       
+                                                       // Returns true if no running merge conflicts
+                                                       // with this one (and, records this merge as
+                                                       // pending), ie, this segment is not currently
+                                                       // being merged:
+                                                       if (RegisterMerge(newMerge))
+                                                       {
+                                                               merge = newMerge;
+                                                               
+                                                               // If this segment is not currently being
+                                                               // merged, then advance it to running & run
+                                                               // the merge ourself (below):
+                                pendingMerges.Remove(merge);    // {{Aroush-2.9}} From Mike Garski: this is an O(n) op... is that an issue?
+                                                               runningMerges.Add(merge);
+                                                               break;
+                                                       }
+                                               }
+                                       }
+                                       
+                                       if (!done && merge == null)
+                                       // We are not yet done (external segments still
+                                       // exist in segmentInfos), yet, all such segments
+                                       // are currently "covered" by a pending or running
+                                       // merge.  We now try to grab any pending merge
+                                       // that involves external segments:
+                                               merge = GetNextExternalMerge();
+                                       
+                                       if (!done && merge == null)
+                                       // We are not yet done, and, all external segments
+                                       // fall under merges that the merge scheduler is
+                                       // currently running.  So, we now wait and check
+                                       // back to see if the merge has completed.
+                                               DoWait();
+                               }
+                               
+                               if (merge != null)
+                               {
+                                       any = true;
+                                       Merge(merge);
+                               }
+                       }
+                       
+                       if (any)
+                       // Sometimes, on copying an external segment over,
+                       // more merges may become necessary:
+                               mergeScheduler.Merge(this);
+               }
+               
+               /// <summary>Merges the provided indexes into this index.
+               /// <p/>After this completes, the index is optimized. <p/>
+               /// <p/>The provided IndexReaders are not closed.<p/>
+               /// 
+               /// <p/><b>NOTE:</b> while this is running, any attempts to
+               /// add or delete documents (with another thread) will be
+               /// paused until this method completes.
+               /// 
+               /// <p/>See {@link #AddIndexesNoOptimize(Directory[])} for
+               /// details on transactional semantics, temporary free
+               /// space required in the Directory, and non-CFS segments
+               /// on an Exception.<p/>
+               /// 
+               /// <p/><b>NOTE</b>: if this method hits an OutOfMemoryError
+               /// you should immediately close the writer.  See <a
+               /// href="#OOME">above</a> for details.<p/>
+               /// 
+               /// </summary>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               public virtual void  AddIndexes(IndexReader[] readers)
+               {
+                       
+                       EnsureOpen();
+                       
+                       // Do not allow add docs or deletes while we are running:
+                       docWriter.PauseAllThreads();
+                       
+                       // We must pre-acquire a read lock here (and upgrade to
+                       // write lock in startTransaction below) so that no
+                       // other addIndexes is allowed to start up after we have
+                       // flushed & optimized but before we then start our
+                       // transaction.  This is because the merging below
+                       // requires that only one segment is present in the
+                       // index:
+                       AcquireRead();
+                       
+                       try
+                       {
+                               
+                               SegmentInfo info = null;
+                               System.String mergedName = null;
+                               SegmentMerger merger = null;
+                               
+                               bool success = false;
+                               
+                               try
+                               {
+                                       Flush(true, false, true);
+                                       Optimize(); // start with zero or 1 seg
+                                       success = true;
+                               }
+                               finally
+                               {
+                                       // Take care to release the read lock if we hit an
+                                       // exception before starting the transaction
+                                       if (!success)
+                                               ReleaseRead();
+                               }
+                               
+                               // true means we already have a read lock; if this
+                               // call hits an exception it will release the write
+                               // lock:
+                               StartTransaction(true);
+                               
+                               try
+                               {
+                                       mergedName = NewSegmentName();
+                                       merger = new SegmentMerger(this, mergedName, null);
+                                       
+                                       SegmentReader sReader = null;
+                                       lock (this)
+                                       {
+                                               if (segmentInfos.Count == 1)
+                                               {
+                                                       // add existing index, if any
+                                                       sReader = readerPool.Get(segmentInfos.Info(0), true, BufferedIndexInput.BUFFER_SIZE, - 1);
+                                               }
+                                       }
+                                       
+                                       success = false;
+                                       
+                                       try
+                                       {
+                                               if (sReader != null)
+                                                       merger.Add(sReader);
+                                               
+                                               for (int i = 0; i < readers.Length; i++)
+                                               // add new indexes
+                                                       merger.Add(readers[i]);
+                                               
+                                               int docCount = merger.Merge(); // merge 'em
+                                               
+                                               lock (this)
+                                               {
+                                                       segmentInfos.Clear(); // pop old infos & add new
+                                                       info = new SegmentInfo(mergedName, docCount, directory, false, true, - 1, null, false, merger.HasProx());
+                                                       SetDiagnostics(info, "addIndexes(IndexReader[])");
+                                                       segmentInfos.Add(info);
+                                               }
+                                               
+                                               // Notify DocumentsWriter that the flushed count just increased
+                                               docWriter.UpdateFlushedDocCount(docCount);
+                                               
+                                               success = true;
+                                       }
+                                       finally
+                                       {
+                                               if (sReader != null)
+                                               {
+                                                       readerPool.Release(sReader);
+                                               }
+                                       }
+                               }
+                               finally
+                               {
+                                       if (!success)
+                                       {
+                                               if (infoStream != null)
+                                                       Message("hit exception in addIndexes during merge");
+                                               RollbackTransaction();
+                                       }
+                                       else
+                                       {
+                                               CommitTransaction();
+                                       }
+                               }
+                               
+                               if (mergePolicy is LogMergePolicy && GetUseCompoundFile())
+                               {
+                                       
+                                       System.Collections.Generic.IList<string> files = null;
+                                       
+                                       lock (this)
+                                       {
+                                               // Must incRef our files so that if another thread
+                                               // is running merge/optimize, it doesn't delete our
+                                               // segment's files before we have a change to
+                                               // finish making the compound file.
+                                               if (segmentInfos.Contains(info))
+                                               {
+                                                       files = info.Files();
+                                                       deleter.IncRef(files);
+                                               }
+                                       }
+                                       
+                                       if (files != null)
+                                       {
+                                               
+                                               success = false;
+                                               
+                                               StartTransaction(false);
+                                               
+                                               try
+                                               {
+                                                       merger.CreateCompoundFile(mergedName + ".cfs");
+                                                       lock (this)
+                                                       {
+                                                               info.SetUseCompoundFile(true);
+                                                       }
+                                                       
+                                                       success = true;
+                                               }
+                                               finally
+                                               {
+                            lock (this)
+                            {
+                                deleter.DecRef(files);
+                            }
+                                                                                                               
+                                                       if (!success)
+                                                       {
+                                                               if (infoStream != null)
+                                                                       Message("hit exception building compound file in addIndexes during merge");
+                                                               
+                                                               RollbackTransaction();
+                                                       }
+                                                       else
+                                                       {
+                                                               CommitTransaction();
+                                                       }
+                                               }
+                                       }
+                               }
+                       }
+                       catch (System.OutOfMemoryException oom)
+                       {
+                               HandleOOM(oom, "addIndexes(IndexReader[])");
+                       }
+                       finally
+                       {
+                               if (docWriter != null)
+                               {
+                                       docWriter.ResumeAllThreads();
+                               }
+                       }
+               }
+
+        ///<summary>
+        /// A hook for extending classes to execute operations after pending added and
+        /// deleted documents have been flushed to the Directory but before the change
+        /// is committed (new segments_N file written).
+        ///</summary>   
+               protected  virtual void  DoAfterFlush()
+               {
+               }
+               
+               /// <summary> Flush all in-memory buffered updates (adds and deletes)
+               /// to the Directory. 
+               /// <p/>Note: while this will force buffered docs to be
+               /// pushed into the index, it will not make these docs
+               /// visible to a reader.  Use {@link #Commit()} instead
+               /// 
+               /// <p/><b>NOTE</b>: if this method hits an OutOfMemoryError
+               /// you should immediately close the writer.  See <a
+               /// href="#OOME">above</a> for details.<p/>
+               /// 
+               /// </summary>
+               /// <deprecated> please call {@link #Commit()}) instead
+               /// 
+               /// </deprecated>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+        [Obsolete("please call Commit() instead")]
+               public void  Flush()
+               {
+                       if (hitOOM)
+                       {
+                               throw new System.SystemException("this writer hit an OutOfMemoryError; cannot flush");
+                       }
+                       
+                       Flush(true, false, true);
+               }
+
+        ///<summary>
+        /// A hook for extending classes to execute operations before pending added and
+        /// deleted documents are flushed to the Directory.
+        ///</summary>
+        protected virtual void DoBeforeFlush() 
+        {
+        }
+               
+               /// <summary>Expert: prepare for commit.
+               /// 
+               /// <p/><b>NOTE</b>: if this method hits an OutOfMemoryError
+               /// you should immediately close the writer.  See <a
+               /// href="#OOME">above</a> for details.<p/>
+               /// 
+               /// </summary>
+               /// <seealso cref="PrepareCommit(Map)">
+               /// </seealso>
+               public void  PrepareCommit()
+               {
+                       EnsureOpen();
+                       PrepareCommit(null);
+               }
+               
+               /// <summary><p/>Expert: prepare for commit, specifying
+               /// commitUserData Map (String -> String).  This does the
+               /// first phase of 2-phase commit.  You can only call this
+               /// when autoCommit is false.  This method does all steps
+               /// necessary to commit changes since this writer was
+               /// opened: flushes pending added and deleted docs, syncs
+               /// the index files, writes most of next segments_N file.
+               /// After calling this you must call either {@link
+               /// #Commit()} to finish the commit, or {@link
+               /// #Rollback()} to revert the commit and undo all changes
+               /// done since the writer was opened.<p/>
+               /// 
+               /// You can also just call {@link #Commit(Map)} directly
+               /// without prepareCommit first in which case that method
+               /// will internally call prepareCommit.
+               /// 
+               /// <p/><b>NOTE</b>: if this method hits an OutOfMemoryError
+               /// you should immediately close the writer.  See <a
+               /// href="#OOME">above</a> for details.<p/>
+               /// 
+               /// </summary>
+               /// <param name="commitUserData">Opaque Map (String->String)
+               /// that's recorded into the segments file in the index,
+               /// and retrievable by {@link
+               /// IndexReader#getCommitUserData}.  Note that when
+               /// IndexWriter commits itself, for example if open with
+               /// autoCommit=true, or, during {@link #close}, the
+               /// commitUserData is unchanged (just carried over from
+               /// the prior commit).  If this is null then the previous
+               /// commitUserData is kept.  Also, the commitUserData will
+               /// only "stick" if there are actually changes in the
+               /// index to commit.  Therefore it's best to use this
+               /// feature only when autoCommit is false.
+               /// </param>
+        public void PrepareCommit(System.Collections.Generic.IDictionary<string, string> commitUserData)
+               {
+                       PrepareCommit(commitUserData, false);
+               }
+
+        private void PrepareCommit(System.Collections.Generic.IDictionary<string, string> commitUserData, bool internal_Renamed)
+               {
+                       
+                       if (hitOOM)
+                       {
+                               throw new System.SystemException("this writer hit an OutOfMemoryError; cannot commit");
+                       }
+                       
+                       if (autoCommit && !internal_Renamed)
+                               throw new System.SystemException("this method can only be used when autoCommit is false");
+                       
+                       if (!autoCommit && pendingCommit != null)
+                               throw new System.SystemException("prepareCommit was already called with no corresponding call to commit");
+                       
+                       if (infoStream != null)
+                               Message("prepareCommit: flush");
+                       
+                       Flush(true, true, true);
+                       
+                       StartCommit(0, commitUserData);
+               }
+               
+        // Used only by commit, below; lock order is commitLock -> IW
+        private Object commitLock = new Object();
+
+               private void  Commit(long sizeInBytes)
+               {
+            lock(commitLock) {
+                StartCommit(sizeInBytes, null);
+                FinishCommit();
+            }
+               }
+               
+               /// <summary> <p/>Commits all pending changes (added &amp; deleted
+               /// documents, optimizations, segment merges, added
+               /// indexes, etc.) to the index, and syncs all referenced
+               /// index files, such that a reader will see the changes
+               /// and the index updates will survive an OS or machine
+               /// crash or power loss.  Note that this does not wait for
+               /// any running background merges to finish.  This may be a
+               /// costly operation, so you should test the cost in your
+               /// application and do it only when really necessary.<p/>
+               /// 
+               /// <p/> Note that this operation calls Directory.sync on
+               /// the index files.  That call should not return until the
+               /// file contents &amp; metadata are on stable storage.  For
+               /// FSDirectory, this calls the OS's fsync.  But, beware:
+               /// some hardware devices may in fact cache writes even
+               /// during fsync, and return before the bits are actually
+               /// on stable storage, to give the appearance of faster
+               /// performance.  If you have such a device, and it does
+               /// not have a battery backup (for example) then on power
+               /// loss it may still lose data.  Lucene cannot guarantee
+               /// consistency on such devices.  <p/>
+               /// 
+               /// <p/><b>NOTE</b>: if this method hits an OutOfMemoryError
+               /// you should immediately close the writer.  See <a
+               /// href="#OOME">above</a> for details.<p/>
+               /// 
+               /// </summary>
+               /// <seealso cref="prepareCommit">
+               /// </seealso>
+               /// <seealso cref="Commit(Map)">
+               /// </seealso>
+               public void  Commit()
+               {
+                       Commit(null);
+               }
+               
+               /// <summary>Commits all changes to the index, specifying a
+               /// commitUserData Map (String -> String).  This just
+               /// calls {@link #PrepareCommit(Map)} (if you didn't
+               /// already call it) and then {@link #finishCommit}.
+               /// 
+               /// <p/><b>NOTE</b>: if this method hits an OutOfMemoryError
+               /// you should immediately close the writer.  See <a
+               /// href="#OOME">above</a> for details.<p/>
+               /// </summary>
+        public void Commit(System.Collections.Generic.IDictionary<string, string> commitUserData)
+               {
+                       
+                       EnsureOpen();
+
+            if (infoStream != null)
+            {
+                Message("commit: start");
+            }
+
+            lock (commitLock)
+            {
+                if (infoStream != null)
+                {
+                    Message("commit: enter lock");
+                }
+                if (autoCommit || pendingCommit == null)
+                {
+                    if (infoStream != null)
+                        Message("commit: now prepare");
+                    PrepareCommit(commitUserData, true);
+                }
+                else if (infoStream != null)
+                {
+                    Message("commit: already prepared");
+                }
+
+                FinishCommit();
+            }
+               }
+               
+               private void  FinishCommit()
+               {
+                       lock (this)
+                       {
+                               
+                               if (pendingCommit != null)
+                               {
+                                       try
+                                       {
+                                               if (infoStream != null)
+                                                       Message("commit: pendingCommit != null");
+                                               pendingCommit.FinishCommit(directory);
+                                               if (infoStream != null)
+                                                       Message("commit: wrote segments file \"" + pendingCommit.GetCurrentSegmentFileName() + "\"");
+                                               lastCommitChangeCount = pendingCommitChangeCount;
+                                               segmentInfos.UpdateGeneration(pendingCommit);
+                                               segmentInfos.SetUserData(pendingCommit.GetUserData());
+                                               SetRollbackSegmentInfos(pendingCommit);
+                                               deleter.Checkpoint(pendingCommit, true);
+                                       }
+                                       finally
+                                       {
+                                               deleter.DecRef(pendingCommit);
+                                               pendingCommit = null;
+                                               System.Threading.Monitor.PulseAll(this);
+                                       }
+                               }
+                else if (infoStream != null)
+                {
+                    Message("commit: pendingCommit == null; skip");
+                }
+
+                if (infoStream != null)
+                {
+                    Message("commit: done");
+                }
+                       }
+               }
+               
+               /// <summary> Flush all in-memory buffered udpates (adds and deletes)
+               /// to the Directory.
+               /// </summary>
+               /// <param name="triggerMerge">if true, we may merge segments (if
+               /// deletes or docs were flushed) if necessary
+               /// </param>
+               /// <param name="flushDocStores">if false we are allowed to keep
+               /// doc stores open to share with the next segment
+               /// </param>
+               /// <param name="flushDeletes">whether pending deletes should also
+               /// be flushed
+               /// </param>
+               public /*protected internal*/ void  Flush(bool triggerMerge, bool flushDocStores, bool flushDeletes)
+               {
+                       // We can be called during close, when closing==true, so we must pass false to ensureOpen:
+                       EnsureOpen(false);
+                       if (DoFlush(flushDocStores, flushDeletes) && triggerMerge)
+                               MaybeMerge();
+               }
+               
+               // TODO: this method should not have to be entirely
+               // synchronized, ie, merges should be allowed to commit
+               // even while a flush is happening
+               private bool DoFlush(bool flushDocStores, bool flushDeletes)
+               {
+                       lock (this)
+                       {
+                               try
+                               {
+                                       return DoFlushInternal(flushDocStores, flushDeletes);
+                               }
+                               finally
+                               {
+                    if (docWriter.DoBalanceRAM())
+                    {
+                        docWriter.BalanceRAM();
+                    }
+                                       docWriter.ClearFlushPending();
+                               }
+                       }
+               }
+               
+               // TODO: this method should not have to be entirely
+               // synchronized, ie, merges should be allowed to commit
+               // even while a flush is happening
+               private bool DoFlushInternal(bool flushDocStores, bool flushDeletes)
+               {
+                       lock (this)
+                       {
+                               
+                               if (hitOOM)
+                               {
+                                       throw new System.SystemException("this writer hit an OutOfMemoryError; cannot flush");
+                               }
+                               
+                               EnsureOpen(false);
+                               
+                               System.Diagnostics.Debug.Assert(TestPoint("startDoFlush"));
+
+                DoBeforeFlush();
+                               
+                               flushCount++;
+                               
+                               // If we are flushing because too many deletes
+                               // accumulated, then we should apply the deletes to free
+                               // RAM:
+                               flushDeletes |= docWriter.DoApplyDeletes();
+                               
+                               // When autoCommit=true we must always flush deletes
+                               // when flushing a segment; otherwise deletes may become
+                               // visible before their corresponding added document
+                               // from an updateDocument call
+                               flushDeletes |= autoCommit;
+                               
+                               // Make sure no threads are actively adding a document.
+                               // Returns true if docWriter is currently aborting, in
+                               // which case we skip flushing this segment
+                if (infoStream != null)
+                {
+                    Message("flush: now pause all indexing threads");
+                }
+                               if (docWriter.PauseAllThreads())
+                               {
+                                       docWriter.ResumeAllThreads();
+                                       return false;
+                               }
+                               
+                               try
+                               {
+                                       
+                                       SegmentInfo newSegment = null;
+                                       
+                                       int numDocs = docWriter.GetNumDocsInRAM();
+                                       
+                                       // Always flush docs if there are any
+                                       bool flushDocs = numDocs > 0;
+                                       
+                                       // With autoCommit=true we always must flush the doc
+                                       // stores when we flush
+                                       flushDocStores |= autoCommit;
+                                       System.String docStoreSegment = docWriter.GetDocStoreSegment();
+                                       
+                                       System.Diagnostics.Debug.Assert(docStoreSegment != null || numDocs == 0);
+                                       
+                                       if (docStoreSegment == null)
+                                               flushDocStores = false;
+                                       
+                                       int docStoreOffset = docWriter.GetDocStoreOffset();
+                                       
+                                       // docStoreOffset should only be non-zero when
+                                       // autoCommit == false
+                                       System.Diagnostics.Debug.Assert(!autoCommit || 0 == docStoreOffset);
+                                       
+                                       bool docStoreIsCompoundFile = false;
+                                       
+                                       if (infoStream != null)
+                                       {
+                                               Message("  flush: segment=" + docWriter.GetSegment() + " docStoreSegment=" + docWriter.GetDocStoreSegment() + " docStoreOffset=" + docStoreOffset + " flushDocs=" + flushDocs + " flushDeletes=" + flushDeletes + " flushDocStores=" + flushDocStores + " numDocs=" + numDocs + " numBufDelTerms=" + docWriter.GetNumBufferedDeleteTerms());
+                                               Message("  index before flush " + SegString());
+                                       }
+                                       
+                                       // Check if the doc stores must be separately flushed
+                                       // because other segments, besides the one we are about
+                                       // to flush, reference it
+                                       if (flushDocStores && (!flushDocs || !docWriter.GetSegment().Equals(docWriter.GetDocStoreSegment())))
+                                       {
+                                               // We must separately flush the doc store
+                                               if (infoStream != null)
+                                                       Message("  flush shared docStore segment " + docStoreSegment);
+                                               
+                                               docStoreIsCompoundFile = FlushDocStores();
+                                               flushDocStores = false;
+                                       }
+                                       
+                                       System.String segment = docWriter.GetSegment();
+                                       
+                                       // If we are flushing docs, segment must not be null:
+                                       System.Diagnostics.Debug.Assert(segment != null || !flushDocs);
+                                       
+                                       if (flushDocs)
+                                       {
+                                               
+                                               bool success = false;
+                                               int flushedDocCount;
+                                               
+                                               try
+                                               {
+                                                       flushedDocCount = docWriter.Flush(flushDocStores);
+                            if (infoStream != null)
+                            {
+                                Message("flushedFiles=" + docWriter.GetFlushedFiles());
+                            }
+                                                       success = true;
+                                               }
+                                               finally
+                                               {
+                                                       if (!success)
+                                                       {
+                                                               if (infoStream != null)
+                                                                       Message("hit exception flushing segment " + segment);
+                                                               deleter.Refresh(segment);
+                                                       }
+                                               }
+                                               
+                                               if (0 == docStoreOffset && flushDocStores)
+                                               {
+                                                       // This means we are flushing private doc stores
+                                                       // with this segment, so it will not be shared
+                                                       // with other segments
+                                                       System.Diagnostics.Debug.Assert(docStoreSegment != null);
+                                                       System.Diagnostics.Debug.Assert(docStoreSegment.Equals(segment));
+                                                       docStoreOffset = - 1;
+                                                       docStoreIsCompoundFile = false;
+                                                       docStoreSegment = null;
+                                               }
+                                               
+                                               // Create new SegmentInfo, but do not add to our
+                                               // segmentInfos until deletes are flushed
+                                               // successfully.
+                                               newSegment = new SegmentInfo(segment, flushedDocCount, directory, false, true, docStoreOffset, docStoreSegment, docStoreIsCompoundFile, docWriter.HasProx());
+                                               SetDiagnostics(newSegment, "flush");
+                                       }
+                                       
+                                       docWriter.PushDeletes();
+                                       
+                                       if (flushDocs)
+                                       {
+                                               segmentInfos.Add(newSegment);
+                                               Checkpoint();
+                                       }
+                                       
+                                       if (flushDocs && mergePolicy.UseCompoundFile(segmentInfos, newSegment))
+                                       {
+                                               // Now build compound file
+                                               bool success = false;
+                                               try
+                                               {
+                                                       docWriter.CreateCompoundFile(segment);
+                                                       success = true;
+                                               }
+                                               finally
+                                               {
+                                                       if (!success)
+                                                       {
+                                                               if (infoStream != null)
+                                                                       Message("hit exception creating compound file for newly flushed segment " + segment);
+                                                               deleter.DeleteFile(segment + "." + IndexFileNames.COMPOUND_FILE_EXTENSION);
+                                                       }
+                                               }
+                                               
+                                               newSegment.SetUseCompoundFile(true);
+                                               Checkpoint();
+                                       }
+                                       
+                                       if (flushDeletes)
+                                       {
+                                               ApplyDeletes();
+                                       }
+                                       
+                                       if (flushDocs)
+                                               Checkpoint();
+                                       
+                                       DoAfterFlush();
+                                       
+                                       return flushDocs;
+                               }
+                               catch (System.OutOfMemoryException oom)
+                               {
+                                       HandleOOM(oom, "doFlush");
+                                       // never hit
+                                       return false;
+                               }
+                               finally
+                               {
+                                       docWriter.ResumeAllThreads();
+                               }
+                       }
+               }
+               
+               /// <summary>Expert:  Return the total size of all index files currently cached in memory.
+               /// Useful for size management with flushRamDocs()
+               /// </summary>
+               public long RamSizeInBytes()
+               {
+                       EnsureOpen();
+                       return docWriter.GetRAMUsed();
+               }
+               
+               /// <summary>Expert:  Return the number of documents currently
+               /// buffered in RAM. 
+               /// </summary>
+               public int NumRamDocs()
+               {
+                       lock (this)
+                       {
+                               EnsureOpen();
+                               return docWriter.GetNumDocsInRAM();
+                       }
+               }
+               
+               private int EnsureContiguousMerge(MergePolicy.OneMerge merge)
+               {
+                       
+                       int first = segmentInfos.IndexOf(merge.segments.Info(0));
+                       if (first == - 1)
+                               throw new MergePolicy.MergeException("could not find segment " + merge.segments.Info(0).name + " in current index " + SegString(), directory);
+                       
+                       int numSegments = segmentInfos.Count;
+                       
+                       int numSegmentsToMerge = merge.segments.Count;
+                       for (int i = 0; i < numSegmentsToMerge; i++)
+                       {
+                               SegmentInfo info = merge.segments.Info(i);
+                               
+                               if (first + i >= numSegments || !segmentInfos.Info(first + i).Equals(info))
+                               {
+                                       if (segmentInfos.IndexOf(info) == - 1)
+                                               throw new MergePolicy.MergeException("MergePolicy selected a segment (" + info.name + ") that is not in the current index " + SegString(), directory);
+                                       else
+                                               throw new MergePolicy.MergeException("MergePolicy selected non-contiguous segments to merge (" + merge.SegString(directory) + " vs " + SegString() + "), which IndexWriter (currently) cannot handle", directory);
+                               }
+                       }
+                       
+                       return first;
+               }
+               
+               /// <summary>Carefully merges deletes for the segments we just
+               /// merged.  This is tricky because, although merging will
+               /// clear all deletes (compacts the documents), new
+               /// deletes may have been flushed to the segments since
+               /// the merge was started.  This method "carries over"
+               /// such new deletes onto the newly merged segment, and
+               /// saves the resulting deletes file (incrementing the
+               /// delete generation for merge.info).  If no deletes were
+               /// flushed, no new deletes file is saved. 
+               /// </summary>
+               private void  CommitMergedDeletes(MergePolicy.OneMerge merge, SegmentReader mergeReader)
+               {
+                       lock (this)
+                       {
+                               
+                               System.Diagnostics.Debug.Assert(TestPoint("startCommitMergeDeletes"));
+                               
+                               SegmentInfos sourceSegments = merge.segments;
+                               
+                               if (infoStream != null)
+                                       Message("commitMergeDeletes " + merge.SegString(directory));
+                               
+                               // Carefully merge deletes that occurred after we
+                               // started merging:
+                               int docUpto = 0;
+                               int delCount = 0;
+                               
+                               for (int i = 0; i < sourceSegments.Count; i++)
+                               {
+                                       SegmentInfo info = sourceSegments.Info(i);
+                                       int docCount = info.docCount;
+                                       SegmentReader previousReader = merge.readersClone[i];
+                                       SegmentReader currentReader = merge.readers[i];
+                                       if (previousReader.HasDeletions())
+                                       {
+                                               
+                                               // There were deletes on this segment when the merge
+                                               // started.  The merge has collapsed away those
+                                               // deletes, but, if new deletes were flushed since
+                                               // the merge started, we must now carefully keep any
+                                               // newly flushed deletes but mapping them to the new
+                                               // docIDs.
+                                               
+                                               if (currentReader.NumDeletedDocs() > previousReader.NumDeletedDocs())
+                                               {
+                                                       // This means this segment has had new deletes
+                                                       // committed since we started the merge, so we
+                                                       // must merge them:
+                                                       for (int j = 0; j < docCount; j++)
+                                                       {
+                                                               if (previousReader.IsDeleted(j))
+                                                               {
+                                                                       System.Diagnostics.Debug.Assert(currentReader.IsDeleted(j));
+                                }
+                                                               else
+                                                               {
+                                                                       if (currentReader.IsDeleted(j))
+                                                                       {
+                                                                               mergeReader.DoDelete(docUpto);
+                                                                               delCount++;
+                                                                       }
+                                                                       docUpto++;
+                                                               }
+                                                       }
+                                               }
+                                               else
+                                               {
+                                                       docUpto += docCount - previousReader.NumDeletedDocs();
+                                               }
+                                       }
+                                       else if (currentReader.HasDeletions())
+                                       {
+                                               // This segment had no deletes before but now it
+                                               // does:
+                                               for (int j = 0; j < docCount; j++)
+                                               {
+                                                       if (currentReader.IsDeleted(j))
+                                                       {
+                                                               mergeReader.DoDelete(docUpto);
+                                                               delCount++;
+                                                       }
+                                                       docUpto++;
+                                               }
+                                       }
+                                       // No deletes before or after
+                                       else
+                                               docUpto += info.docCount;
+                               }
+                               
+                               System.Diagnostics.Debug.Assert(mergeReader.NumDeletedDocs() == delCount);
+                               
+                               mergeReader.hasChanges = delCount > 0;
+                       }
+               }
+               
+               /* FIXME if we want to support non-contiguous segment merges */
+               private bool CommitMerge(MergePolicy.OneMerge merge, SegmentMerger merger, int mergedDocCount, SegmentReader mergedReader)
+               {
+                       lock (this)
+                       {
+                               
+                               System.Diagnostics.Debug.Assert(TestPoint("startCommitMerge"));
+                               
+                               if (hitOOM)
+                               {
+                                       throw new System.SystemException("this writer hit an OutOfMemoryError; cannot complete merge");
+                               }
+                               
+                               if (infoStream != null)
+                                       Message("commitMerge: " + merge.SegString(directory) + " index=" + SegString());
+                               
+                               System.Diagnostics.Debug.Assert(merge.registerDone);
+                               
+                               // If merge was explicitly aborted, or, if rollback() or
+                               // rollbackTransaction() had been called since our merge
+                               // started (which results in an unqualified
+                               // deleter.refresh() call that will remove any index
+                               // file that current segments does not reference), we
+                               // abort this merge
+                               if (merge.IsAborted())
+                               {
+                                       if (infoStream != null)
+                                               Message("commitMerge: skipping merge " + merge.SegString(directory) + ": it was aborted");
+                                       
+                                       return false;
+                               }
+                               
+                               int start = EnsureContiguousMerge(merge);
+                               
+                               CommitMergedDeletes(merge, mergedReader);
+                               docWriter.RemapDeletes(segmentInfos, merger.GetDocMaps(), merger.GetDelCounts(), merge, mergedDocCount);
+
+                // If the doc store we are using has been closed and
+                // is in now compound format (but wasn't when we
+                // started), then we will switch to the compound
+                // format as well:
+                SetMergeDocStoreIsCompoundFile(merge);
+                               
+                               merge.info.SetHasProx(merger.HasProx());
+                               
+                               ((System.Collections.IList) ((System.Collections.ArrayList) segmentInfos).GetRange(start, start + merge.segments.Count - start)).Clear();
+                               System.Diagnostics.Debug.Assert(!segmentInfos.Contains(merge.info));
+                               segmentInfos.Insert(start, merge.info);
+
+                CloseMergeReaders(merge, false);
+                               
+                               // Must note the change to segmentInfos so any commits
+                               // in-flight don't lose it:
+                               Checkpoint();
+                               
+                               // If the merged segments had pending changes, clear
+                               // them so that they don't bother writing them to
+                               // disk, updating SegmentInfo, etc.:
+                               readerPool.Clear(merge.segments);
+
+                if (merge.optimize)
+                {
+                    // cascade the optimize:
+                    segmentsToOptimize[merge.info] = merge.info;
+                }
+                               return true;
+                       }
+               }
+               
+               private void  HandleMergeException(System.Exception t, MergePolicy.OneMerge merge)
+               {
+                       
+                       if (infoStream != null)
+                       {
+                               Message("handleMergeException: merge=" + merge.SegString(directory) + " exc=" + t);
+                       }
+                       
+                       // Set the exception on the merge, so if
+                       // optimize() is waiting on us it sees the root
+                       // cause exception:
+                       merge.SetException(t);
+                       AddMergeException(merge);
+                       
+                       if (t is MergePolicy.MergeAbortedException)
+                       {
+                               // We can ignore this exception (it happens when
+                               // close(false) or rollback is called), unless the
+                               // merge involves segments from external directories,
+                               // in which case we must throw it so, for example, the
+                               // rollbackTransaction code in addIndexes* is
+                               // executed.
+                               if (merge.isExternal)
+                                       throw (MergePolicy.MergeAbortedException) t;
+                       }
+                       else if (t is System.IO.IOException)
+                               throw (System.IO.IOException) t;
+                       else if (t is System.SystemException)
+                               throw (System.SystemException) t;
+                       else if (t is System.ApplicationException)
+                               throw (System.ApplicationException) t;
+                       // Should not get here
+                       else
+                               throw new System.SystemException(null, t);
+               }
+               
+               public void Merge_ForNUnit(MergePolicy.OneMerge merge)
+        {
+            Merge(merge);
+        }
+               /// <summary> Merges the indicated segments, replacing them in the stack with a
+               /// single segment.
+               /// </summary>
+               internal void  Merge(MergePolicy.OneMerge merge)
+               {
+                       
+                       bool success = false;
+                       
+                       try
+                       {
+                               try
+                               {
+                                       try
+                                       {
+                                               MergeInit(merge);
+                                               
+                                               if (infoStream != null)
+                                               {
+                                                       Message("now merge\n  merge=" + merge.SegString(directory) + "\n  merge=" + merge + "\n  index=" + SegString());
+                                               }
+                                               
+                                               MergeMiddle(merge);
+                                               MergeSuccess(merge);
+                                               success = true;
+                                       }
+                                       catch (System.Exception t)
+                                       {
+                                               HandleMergeException(t, merge);
+                                       }
+                               }
+                               finally
+                               {
+                                       lock (this)
+                                       {
+                                               MergeFinish(merge);
+                                               
+                                               if (!success)
+                                               {
+                                                       if (infoStream != null)
+                                                               Message("hit exception during merge");
+                                                       if (merge.info != null && !segmentInfos.Contains(merge.info))
+                                                               deleter.Refresh(merge.info.name);
+                                               }
+                                               
+                                               // This merge (and, generally, any change to the
+                                               // segments) may now enable new merges, so we call
+                                               // merge policy & update pending merges.
+                                               if (success && !merge.IsAborted() && !closed && !closing)
+                                                       UpdatePendingMerges(merge.maxNumSegmentsOptimize, merge.optimize);
+                                       }
+                               }
+                       }
+                       catch (System.OutOfMemoryException oom)
+                       {
+                               HandleOOM(oom, "merge");
+                       }
+               }
+               
+               /// <summary>Hook that's called when the specified merge is complete. </summary>
+               internal virtual void  MergeSuccess(MergePolicy.OneMerge merge)
+               {
+               }
+               
+               /// <summary>Checks whether this merge involves any segments
+               /// already participating in a merge.  If not, this merge
+               /// is "registered", meaning we record that its segments
+               /// are now participating in a merge, and true is
+               /// returned.  Else (the merge conflicts) false is
+               /// returned. 
+               /// </summary>
+               internal bool RegisterMerge(MergePolicy.OneMerge merge)
+               {
+                       lock (this)
+                       {
+                               
+                               if (merge.registerDone)
+                                       return true;
+                               
+                               if (stopMerges)
+                               {
+                                       merge.Abort();
+                                       throw new MergePolicy.MergeAbortedException("merge is aborted: " + merge.SegString(directory));
+                               }
+                               
+                               int count = merge.segments.Count;
+                               bool isExternal = false;
+                               for (int i = 0; i < count; i++)
+                               {
+                                       SegmentInfo info = merge.segments.Info(i);
+                    if (mergingSegments.Contains(info))
+                    {
+                        return false;
+                    }
+                    if (segmentInfos.IndexOf(info) == -1)
+                    {
+                        return false;
+                    }
+                    if (info.dir != directory)
+                    {
+                        isExternal = true;
+                    }
+                    if (segmentsToOptimize.Contains(info))
+                    {
+                        merge.optimize = true;
+                        merge.maxNumSegmentsOptimize = optimizeMaxNumSegments;
+                    }
+                               }
+                               
+                               EnsureContiguousMerge(merge);
+                               
+                               pendingMerges.AddLast(merge);
+                               
+                               if (infoStream != null)
+                                       Message("add merge to pendingMerges: " + merge.SegString(directory) + " [total " + pendingMerges.Count + " pending]");
+                               
+                               merge.mergeGen = mergeGen;
+                               merge.isExternal = isExternal;
+                               
+                               // OK it does not conflict; now record that this merge
+                               // is running (while synchronized) to avoid race
+                               // condition where two conflicting merges from different
+                               // threads, start
+                for (int i = 0; i < count; i++)
+                {
+                    SegmentInfo si = merge.segments.Info(i);
+                    mergingSegments[si] = si;
+                }
+                               
+                               // Merge is now registered
+                               merge.registerDone = true;
+                               return true;
+                       }
+               }
+               
+               /// <summary>Does initial setup for a merge, which is fast but holds
+               /// the synchronized lock on IndexWriter instance.  
+               /// </summary>
+               internal void  MergeInit(MergePolicy.OneMerge merge)
+               {
+                       lock (this)
+                       {
+                               bool success = false;
+                               try
+                               {
+                                       _MergeInit(merge);
+                                       success = true;
+                               }
+                               finally
+                               {
+                                       if (!success)
+                                       {
+                                               MergeFinish(merge);
+                                       }
+                               }
+                       }
+               }
+               
+               private void  _MergeInit(MergePolicy.OneMerge merge)
+               {
+                       lock (this)
+                       {
+                               
+                               System.Diagnostics.Debug.Assert(TestPoint("startMergeInit"));
+                               
+                               System.Diagnostics.Debug.Assert(merge.registerDone);
+                               System.Diagnostics.Debug.Assert(!merge.optimize || merge.maxNumSegmentsOptimize > 0);
+                               
+                               if (hitOOM)
+                               {
+                                       throw new System.SystemException("this writer hit an OutOfMemoryError; cannot merge");
+                               }
+                               
+                               if (merge.info != null)
+                               // mergeInit already done
+                                       return ;
+                               
+                               if (merge.IsAborted())
+                                       return ;
+                               
+                               bool changed = ApplyDeletes();
+                               
+                               // If autoCommit == true then all deletes should have
+                               // been flushed when we flushed the last segment
+                               System.Diagnostics.Debug.Assert(!changed || !autoCommit);
+                               
+                               SegmentInfos sourceSegments = merge.segments;
+                               int end = sourceSegments.Count;
+                               
+                               // Check whether this merge will allow us to skip
+                               // merging the doc stores (stored field & vectors).
+                               // This is a very substantial optimization (saves tons
+                               // of IO) that can only be applied with
+                               // autoCommit=false.
+                               
+                               Directory lastDir = directory;
+                               System.String lastDocStoreSegment = null;
+                               int next = - 1;
+                               
+                               bool mergeDocStores = false;
+                               bool doFlushDocStore = false;
+                               System.String currentDocStoreSegment = docWriter.GetDocStoreSegment();
+                               
+                               // Test each segment to be merged: check if we need to
+                               // flush/merge doc stores
+                               for (int i = 0; i < end; i++)
+                               {
+                                       SegmentInfo si = sourceSegments.Info(i);
+                                       
+                                       // If it has deletions we must merge the doc stores
+                                       if (si.HasDeletions())
+                                               mergeDocStores = true;
+                                       
+                                       // If it has its own (private) doc stores we must
+                                       // merge the doc stores
+                                       if (- 1 == si.GetDocStoreOffset())
+                                               mergeDocStores = true;
+                                       
+                                       // If it has a different doc store segment than
+                                       // previous segments, we must merge the doc stores
+                                       System.String docStoreSegment = si.GetDocStoreSegment();
+                                       if (docStoreSegment == null)
+                                               mergeDocStores = true;
+                                       else if (lastDocStoreSegment == null)
+                                               lastDocStoreSegment = docStoreSegment;
+                                       else if (!lastDocStoreSegment.Equals(docStoreSegment))
+                                               mergeDocStores = true;
+                                       
+                                       // Segments' docScoreOffsets must be in-order,
+                                       // contiguous.  For the default merge policy now
+                                       // this will always be the case but for an arbitrary
+                                       // merge policy this may not be the case
+                                       if (- 1 == next)
+                                               next = si.GetDocStoreOffset() + si.docCount;
+                                       else if (next != si.GetDocStoreOffset())
+                                               mergeDocStores = true;
+                                       else
+                                               next = si.GetDocStoreOffset() + si.docCount;
+                                       
+                                       // If the segment comes from a different directory
+                                       // we must merge
+                                       if (lastDir != si.dir)
+                                               mergeDocStores = true;
+                                       
+                                       // If the segment is referencing the current "live"
+                                       // doc store outputs then we must merge
+                                       if (si.GetDocStoreOffset() != - 1 && currentDocStoreSegment != null && si.GetDocStoreSegment().Equals(currentDocStoreSegment))
+                                       {
+                                               doFlushDocStore = true;
+                                       }
+                               }
+
+                // if a mergedSegmentWarmer is installed, we must merge
+                // the doc stores because we will open a full
+                // SegmentReader on the merged segment:
+                if (!mergeDocStores && mergedSegmentWarmer != null && currentDocStoreSegment != null && lastDocStoreSegment != null && lastDocStoreSegment.Equals(currentDocStoreSegment))
+                {
+                    mergeDocStores = true;
+                }
+
+                               int docStoreOffset;
+                               System.String docStoreSegment2;
+                               bool docStoreIsCompoundFile;
+                               
+                               if (mergeDocStores)
+                               {
+                                       docStoreOffset = - 1;
+                                       docStoreSegment2 = null;
+                                       docStoreIsCompoundFile = false;
+                               }
+                               else
+                               {
+                                       SegmentInfo si = sourceSegments.Info(0);
+                                       docStoreOffset = si.GetDocStoreOffset();
+                                       docStoreSegment2 = si.GetDocStoreSegment();
+                                       docStoreIsCompoundFile = si.GetDocStoreIsCompoundFile();
+                               }
+                               
+                               if (mergeDocStores && doFlushDocStore)
+                               {
+                                       // SegmentMerger intends to merge the doc stores
+                                       // (stored fields, vectors), and at least one of the
+                                       // segments to be merged refers to the currently
+                                       // live doc stores.
+                                       
+                                       // TODO: if we know we are about to merge away these
+                                       // newly flushed doc store files then we should not
+                                       // make compound file out of them...
+                                       if (infoStream != null)
+                                               Message("now flush at merge");
+                                       DoFlush(true, false);
+                               }
+                               
+                               merge.mergeDocStores = mergeDocStores;
+                               
+                               // Bind a new segment name here so even with
+                               // ConcurrentMergePolicy we keep deterministic segment
+                               // names.
+                               merge.info = new SegmentInfo(NewSegmentName(), 0, directory, false, true, docStoreOffset, docStoreSegment2, docStoreIsCompoundFile, false);
+
+
+                System.Collections.Generic.IDictionary<string, string> details = new System.Collections.Generic.Dictionary<string, string>();
+                               details["optimize"] = merge.optimize + "";
+                               details["mergeFactor"] = end + "";
+                               details["mergeDocStores"] = mergeDocStores + "";
+                               SetDiagnostics(merge.info, "merge", details);
+                               
+                               // Also enroll the merged segment into mergingSegments;
+                               // this prevents it from getting selected for a merge
+                               // after our merge is done but while we are building the
+                               // CFS:
+                mergingSegments[merge.info] = merge.info;
+                       }
+               }
+               
+               private void  SetDiagnostics(SegmentInfo info, System.String source)
+               {
+                       SetDiagnostics(info, source, null);
+               }
+
+        private void SetDiagnostics(SegmentInfo info, System.String source, System.Collections.Generic.IDictionary<string, string> details)
+               {
+            System.Collections.Generic.IDictionary<string, string> diagnostics = new System.Collections.Generic.Dictionary<string,string>();
+                       diagnostics["source"] = source;
+                       diagnostics["lucene.version"] = Constants.LUCENE_VERSION;
+                       diagnostics["os"] = Constants.OS_NAME + "";
+                       diagnostics["os.arch"] = Constants.OS_ARCH + "";
+                       diagnostics["os.version"] = Constants.OS_VERSION + "";
+                       diagnostics["java.version"] = Constants.JAVA_VERSION + "";
+                       diagnostics["java.vendor"] = Constants.JAVA_VENDOR + "";
+                       if (details != null)
+                       {
+                               //System.Collections.ArrayList keys = new System.Collections.ArrayList(details.Keys);
+                               //System.Collections.ArrayList values = new System.Collections.ArrayList(details.Values);
+                foreach (string key in details.Keys)
+                {
+                    diagnostics[key] = details[key];
+                }
+                       }
+                       info.SetDiagnostics(diagnostics);
+               }
+               
+               /// <summary>This is called after merging a segment and before
+               /// building its CFS.  Return true if the files should be
+               /// sync'd.  If you return false, then the source segment
+               /// files that were merged cannot be deleted until the CFS
+               /// file is built &amp; sync'd.  So, returning false consumes
+               /// more transient disk space, but saves performance of
+               /// not having to sync files which will shortly be deleted
+               /// anyway.
+               /// </summary>
+               /// <deprecated> -- this will be removed in 3.0 when
+               /// autoCommit is hardwired to false 
+               /// </deprecated>
+        [Obsolete("-- this will be removed in 3.0 when autoCommit is hardwired to false ")]
+               private bool DoCommitBeforeMergeCFS(MergePolicy.OneMerge merge)
+               {
+                       lock (this)
+                       {
+                               long freeableBytes = 0;
+                               int size = merge.segments.Count;
+                               for (int i = 0; i < size; i++)
+                               {
+                                       SegmentInfo info = merge.segments.Info(i);
+                                       // It's only important to sync if the most recent
+                                       // commit actually references this segment, because if
+                                       // it doesn't, even without syncing we will free up
+                                       // the disk space:
+                    bool exist = rollbackSegments.ContainsKey(info);
+                    if (exist)
+                                       {
+                                               int loc = (System.Int32) rollbackSegments[info];
+                                               SegmentInfo oldInfo = rollbackSegmentInfos.Info(loc);
+                                               if (oldInfo.GetUseCompoundFile() != info.GetUseCompoundFile())
+                                                       freeableBytes += info.SizeInBytes();
+                                       }
+                               }
+                               // If we would free up more than 1/3rd of the index by
+                               // committing now, then do so:
+                               long totalBytes = 0;
+                               int numSegments = segmentInfos.Count;
+                               for (int i = 0; i < numSegments; i++)
+                                       totalBytes += segmentInfos.Info(i).SizeInBytes();
+                               if (3 * freeableBytes > totalBytes)
+                                       return true;
+                               else
+                                       return false;
+                       }
+               }
+               
+               /// <summary>Does fininishing for a merge, which is fast but holds
+               /// the synchronized lock on IndexWriter instance. 
+               /// </summary>
+               internal void  MergeFinish(MergePolicy.OneMerge merge)
+               {
+                       lock (this)
+                       {
+                               
+                               // Optimize, addIndexes or finishMerges may be waiting
+                               // on merges to finish.
+                               System.Threading.Monitor.PulseAll(this);
+                               
+                               // It's possible we are called twice, eg if there was an
+                               // exception inside mergeInit
+                               if (merge.registerDone)
+                               {
+                                       SegmentInfos sourceSegments = merge.segments;
+                                       int end = sourceSegments.Count;
+                                       for (int i = 0; i < end; i++)
+                                               mergingSegments.Remove(sourceSegments.Info(i));
+                    if(merge.info != null)
+                                           mergingSegments.Remove(merge.info);
+                                       merge.registerDone = false;
+                               }
+                               
+                               runningMerges.Remove(merge);
+                       }
+               }
+               
+        private void SetMergeDocStoreIsCompoundFile(MergePolicy.OneMerge merge)
+        {
+            lock (this)
+            {
+                string mergeDocStoreSegment = merge.info.GetDocStoreSegment();
+                if (mergeDocStoreSegment != null && !merge.info.GetDocStoreIsCompoundFile())
+                {
+                    int size = segmentInfos.Count;
+                    for (int i = 0; i < size; i++)
+                    {
+                        SegmentInfo info = segmentInfos.Info(i);
+                        string docStoreSegment = info.GetDocStoreSegment();
+                        if (docStoreSegment != null &&
+                            docStoreSegment.Equals(mergeDocStoreSegment) &&
+                            info.GetDocStoreIsCompoundFile())
+                        {
+                            merge.info.SetDocStoreIsCompoundFile(true);
+                            break;
+                        }
+                    }
+                }
+            }
+        }
+
+        private void CloseMergeReaders(MergePolicy.OneMerge merge, bool suppressExceptions)
+        {
+            lock (this)
+            {
+                int numSegments = merge.segments.Count;
+                if (suppressExceptions)
+                {
+                    // Suppress any new exceptions so we throw the
+                    // original cause
+                    for (int i = 0; i < numSegments; i++)
+                    {
+                        if (merge.readers[i] != null)
+                        {
+                            try
+                            {
+                                readerPool.Release(merge.readers[i], false);
+                            }
+                            catch (Exception t)
+                            {
+                            }
+                            merge.readers[i] = null;
+                        }
+
+                        if (merge.readersClone[i] != null)
+                        {
+                            try
+                            {
+                                merge.readersClone[i].Close();
+                            }
+                            catch (Exception t)
+                            {
+                            }
+                            // This was a private clone and we had the
+                            // only reference
+                            System.Diagnostics.Debug.Assert(merge.readersClone[i].GetRefCount() == 0); //: "refCount should be 0 but is " + merge.readersClone[i].getRefCount();
+                            merge.readersClone[i] = null;
+                        }
+                    }
+                }
+                else
+                {
+                    for (int i = 0; i < numSegments; i++)
+                    {
+                        if (merge.readers[i] != null)
+                        {
+                            readerPool.Release(merge.readers[i], true);
+                            merge.readers[i] = null;
+                        }
+
+                        if (merge.readersClone[i] != null)
+                        {
+                            merge.readersClone[i].Close();
+                            // This was a private clone and we had the only reference
+                            System.Diagnostics.Debug.Assert(merge.readersClone[i].GetRefCount() == 0);
+                            merge.readersClone[i] = null;
+                        }
+                    }
+                }
+            }
+        }
+
+
+               /// <summary>Does the actual (time-consuming) work of the merge,
+               /// but without holding synchronized lock on IndexWriter
+               /// instance 
+               /// </summary>
+               private int MergeMiddle(MergePolicy.OneMerge merge)
+               {
+                       
+                       merge.CheckAborted(directory);
+                       
+                       System.String mergedName = merge.info.name;
+                       
+                       SegmentMerger merger = null;
+                       
+                       int mergedDocCount = 0;
+                       
+                       SegmentInfos sourceSegments = merge.segments;
+                       int numSegments = sourceSegments.Count;
+                       
+                       if (infoStream != null)
+                               Message("merging " + merge.SegString(directory));
+                       
+                       merger = new SegmentMerger(this, mergedName, merge);
+                       
+                       merge.readers = new SegmentReader[numSegments];
+                       merge.readersClone = new SegmentReader[numSegments];
+                       
+                       bool mergeDocStores = false;
+
+            System.Collections.Hashtable dss = new System.Collections.Hashtable();
+                       
+            String currentDocStoreSegment;
+            lock(this) {
+                currentDocStoreSegment = docWriter.GetDocStoreSegment();
+            }
+            bool currentDSSMerged = false;
+
+                       // This is try/finally to make sure merger's readers are
+                       // closed:
+                       bool success = false;
+            try
+            {
+                int totDocCount = 0;
+
+                for (int i = 0; i < numSegments; i++)
+                {
+
+                    SegmentInfo info = sourceSegments.Info(i);
+
+                    // Hold onto the "live" reader; we will use this to
+                    // commit merged deletes
+                    SegmentReader reader = merge.readers[i] = readerPool.Get(info, merge.mergeDocStores, MERGE_READ_BUFFER_SIZE, -1);
+
+                    // We clone the segment readers because other
+                    // deletes may come in while we're merging so we
+                    // need readers that will not change
+                    SegmentReader clone = merge.readersClone[i] = (SegmentReader)reader.Clone(true);
+                    merger.Add(clone);
+
+                    if (clone.HasDeletions())
+                    {
+                        mergeDocStores = true;
+                    }
+
+                    if (info.GetDocStoreOffset() != -1 && currentDocStoreSegment != null)
+                    {
+                        currentDSSMerged |= currentDocStoreSegment.Equals(info.GetDocStoreSegment());
+                    }
+
+                    totDocCount += clone.NumDocs();
+                }
+
+                if (infoStream != null)
+                {
+                    Message("merge: total " + totDocCount + " docs");
+                }
+
+                merge.CheckAborted(directory);
+
+                // If deletions have arrived and it has now become
+                // necessary to merge doc stores, go and open them:
+                if (mergeDocStores && !merge.mergeDocStores)
+                {
+                    merge.mergeDocStores = true;
+                    lock (this)
+                    {
+                        if (currentDSSMerged)
+                        {
+                            if (infoStream != null)
+                            {
+                                Message("now flush at mergeMiddle");
+                            }
+                            DoFlush(true, false);
+                        }
+                    }
+
+                    for (int i = 0; i < numSegments; i++)
+                    {
+                        merge.readersClone[i].OpenDocStores();
+                    }
+
+                    // Clear DSS
+                    merge.info.SetDocStore(-1, null, false);
+
+                }
+
+                // This is where all the work happens:
+                mergedDocCount = merge.info.docCount = merger.Merge(merge.mergeDocStores);
+
+                System.Diagnostics.Debug.Assert(mergedDocCount == totDocCount);
+
+                if (merge.useCompoundFile)
+                {
+
+                    success = false;
+                    string compoundFileName = IndexFileNames.SegmentFileName(mergedName, IndexFileNames.COMPOUND_FILE_EXTENSION);
+
+                    try
+                    {
+                        if (infoStream != null)
+                        {
+                            Message("create compound file " + compoundFileName);
+                        }
+                        merger.CreateCompoundFile(compoundFileName);
+                        success = true;
+                    }
+                    catch (System.IO.IOException ioe)
+                    {
+                        lock (this)
+                        {
+                            if (merge.IsAborted())
+                            {
+                                // This can happen if rollback or close(false)
+                                // is called -- fall through to logic below to
+                                // remove the partially created CFS:
+                            }
+                            else
+                            {
+                                HandleMergeException(ioe, merge);
+                            }
+                        }
+                    }
+                    catch (Exception t)
+                    {
+                        HandleMergeException(t, merge);
+                    }
+                    finally
+                    {
+                        if (!success)
+                        {
+                            if (infoStream != null)
+                            {
+                                Message("hit exception creating compound file during merge");
+                            }
+
+                            lock (this)
+                            {
+                                deleter.DeleteFile(compoundFileName);
+                                deleter.DeleteNewFiles(merger.GetMergedFiles());
+                            }
+                        }
+                    }
+
+                    success = false;
+
+                    lock (this)
+                    {
+
+                        // delete new non cfs files directly: they were never
+                        // registered with IFD
+                        deleter.DeleteNewFiles(merger.GetMergedFiles());
+
+                        if (merge.IsAborted())
+                        {
+                            if (infoStream != null)
+                            {
+                                Message("abort merge after building CFS");
+                            }
+                            deleter.DeleteFile(compoundFileName);
+                            return 0;
+                        }
+                    }
+
+                    merge.info.SetUseCompoundFile(true);
+                }
+
+                int termsIndexDivisor;
+                bool loadDocStores;
+
+                // if the merged segment warmer was not installed when
+                // this merge was started, causing us to not force
+                // the docStores to close, we can't warm it now
+                bool canWarm = merge.info.GetDocStoreSegment() == null || currentDocStoreSegment == null || !merge.info.GetDocStoreSegment().Equals(currentDocStoreSegment);
+
+                if (poolReaders && mergedSegmentWarmer != null && canWarm)
+                {
+                    // Load terms index & doc stores so the segment
+                    // warmer can run searches, load documents/term
+                    // vectors
+                    termsIndexDivisor = readerTermsIndexDivisor;
+                    loadDocStores = true;
+                }
+                else
+                {
+                    termsIndexDivisor = -1;
+                    loadDocStores = false;
+                }
+
+                // TODO: in the non-realtime case, we may want to only
+                // keep deletes (it's costly to open entire reader
+                // when we just need deletes)
+
+                SegmentReader mergedReader = readerPool.Get(merge.info, loadDocStores, BufferedIndexInput.BUFFER_SIZE, termsIndexDivisor);
+                try
+                {
+                    if (poolReaders && mergedSegmentWarmer != null)
+                    {
+                        mergedSegmentWarmer.Warm(mergedReader);
+                    }
+                    if (!CommitMerge(merge, merger, mergedDocCount, mergedReader))
+                    {
+                        // commitMerge will return false if this merge was aborted
+                        return 0;
+                    }
+                }
+                finally
+                {
+                    lock (this)
+                    {
+                        readerPool.Release(mergedReader);
+                    }
+                }
+
+                success = true;
+            }
+            finally
+            {
+                // Readers are already closed in commitMerge if we didn't hit
+                // an exc:
+                if (!success)
+                {
+                    CloseMergeReaders(merge, true);
+                }
+            }
+
+            merge.mergeDone = true;
+
+            lock (mergeScheduler)
+            {
+                System.Threading.Monitor.PulseAll(mergeScheduler); 
+            }
+
+                       // Force a sync after commiting the merge.  Once this
+                       // sync completes then all index files referenced by the
+                       // current segmentInfos are on stable storage so if the
+                       // OS/machine crashes, or power cord is yanked, the
+                       // index will be intact.  Note that this is just one
+                       // (somewhat arbitrary) policy; we could try other
+                       // policies like only sync if it's been > X minutes or
+                       // more than Y bytes have been written, etc.
+                       if (autoCommit)
+                       {
+                               long size;
+                               lock (this)
+                               {
+                                       size = merge.info.SizeInBytes();
+                               }
+                               Commit(size);
+                       }
+                       
+                       return mergedDocCount;
+               }
+               
+               internal virtual void  AddMergeException(MergePolicy.OneMerge merge)
+               {
+                       lock (this)
+                       {
+                               System.Diagnostics.Debug.Assert(merge.GetException() != null);
+                               if (!mergeExceptions.Contains(merge) && mergeGen == merge.mergeGen)
+                                       mergeExceptions.Add(merge);
+                       }
+               }
+               
+               // Apply buffered deletes to all segments.
+               private bool ApplyDeletes()
+               {
+                       lock (this)
+                       {
+                               System.Diagnostics.Debug.Assert(TestPoint("startApplyDeletes"));
+                flushDeletesCount++;
+                               
+                               bool success = false;
+                               bool changed;
+                               try
+                               {
+                                       changed = docWriter.ApplyDeletes(segmentInfos);
+                                       success = true;
+                               }
+                               finally
+                               {
+                    if (!success && infoStream != null)
+                    {
+                        Message("hit exception flushing deletes");
+                    }
+                               }
+                               
+                               if (changed)
+                                       Checkpoint();
+                               return changed;
+                       }
+               }
+               
+               // For test purposes.
+               public /*internal*/ int GetBufferedDeleteTermsSize()
+               {
+                       lock (this)
+                       {
+                               return docWriter.GetBufferedDeleteTerms().Count;
+                       }
+               }
+               
+               // For test purposes.
+               public /*internal*/ int GetNumBufferedDeleteTerms()
+               {
+                       lock (this)
+                       {
+                               return docWriter.GetNumBufferedDeleteTerms();
+                       }
+               }
+               
+               // utility routines for tests
+               public /*internal*/ virtual SegmentInfo NewestSegment()
+               {
+            return segmentInfos.Count > 0 ? segmentInfos.Info(segmentInfos.Count - 1) : null;
+               }
+               
+               public virtual System.String SegString()
+               {
+                       lock (this)
+                       {
+                               return SegString(segmentInfos);
+                       }
+               }
+               
+               private System.String SegString(SegmentInfos infos)
+               {
+                       lock (this)
+                       {
+                               System.Text.StringBuilder buffer = new System.Text.StringBuilder();
+                               int count = infos.Count;
+                               for (int i = 0; i < count; i++)
+                               {
+                                       if (i > 0)
+                                       {
+                                               buffer.Append(' ');
+                                       }
+                                       SegmentInfo info = infos.Info(i);
+                                       buffer.Append(info.SegString(directory));
+                                       if (info.dir != directory)
+                                               buffer.Append("**");
+                               }
+                               return buffer.ToString();
+                       }
+               }
+               
+               // Files that have been sync'd already
+        private System.Collections.Generic.Dictionary<string, string> synced = new System.Collections.Generic.Dictionary<string, string>();
+               
+               // Files that are now being sync'd
+        private System.Collections.Hashtable syncing = new System.Collections.Hashtable();
+               
+               private bool StartSync(System.String fileName, System.Collections.Generic.ICollection<System.String> pending)
+               {
+                       lock (synced)
+                       {
+                               if (!synced.ContainsKey(fileName))
+                               {
+                                       if (!syncing.Contains(fileName))
+                                       {
+                                               syncing[fileName] = fileName;
+                                               return true;
+                                       }
+                                       else
+                                       {
+                                               pending.Add(fileName);
+                                               return false;
+                                       }
+                               }
+                               else
+                                       return false;
+                       }
+               }
+               
+               private void  FinishSync(System.String fileName, bool success)
+               {
+                       lock (synced)
+                       {
+                               System.Diagnostics.Debug.Assert(syncing.ContainsKey(fileName));
+                               syncing.Remove(fileName);
+                               if (success)
+                    synced[fileName] = fileName;
+                               System.Threading.Monitor.PulseAll(synced);
+                       }
+               }
+               
+               /// <summary>Blocks until all files in syncing are sync'd </summary>
+               private bool WaitForAllSynced(System.Collections.Generic.ICollection<System.String> syncing)
+               {
+                       lock (synced)
+                       {
+                               System.Collections.Generic.IEnumerator<System.String> it = syncing.GetEnumerator();
+                               while (it.MoveNext())
+                               {
+                                       System.String fileName = (System.String) it.Current;
+                                       while (!synced.ContainsKey(fileName))
+                                       {
+                                               if (!syncing.Contains(fileName))
+                                               // There was an error because a file that was
+                                               // previously syncing failed to appear in synced
+                                                       return false;
+                                               else
+                                                       try
+                                                       {
+                                                               System.Threading.Monitor.Wait(synced);
+                                                       }
+                                                       catch (System.Threading.ThreadInterruptedException ie)
+                                                       {
+                                                               // In 3.0 we will change this to throw
+                                                               // InterruptedException instead
+                                                               SupportClass.ThreadClass.Current().Interrupt();
+                                                               throw new System.SystemException(ie.Message, ie);
+                                                       }
+                                       }
+                               }
+                               return true;
+                       }
+               }
+               
+               /// <summary>Pauses before syncing.  On Windows, at least, it's
+               /// best (performance-wise) to pause in order to let OS
+               /// flush writes to disk on its own, before forcing a
+               /// sync.
+               /// </summary>
+               /// <deprecated> -- this will be removed in 3.0 when
+               /// autoCommit is hardwired to false 
+               /// </deprecated>
+        [Obsolete("-- this will be removed in 3.0 when autoCommit is hardwired to false ")]
+               private void  SyncPause(long sizeInBytes)
+               {
+                       if (mergeScheduler is ConcurrentMergeScheduler && maxSyncPauseSeconds > 0)
+                       {
+                               // Rough heuristic: for every 10 MB, we pause for 1
+                               // second, up until the max
+                               long pauseTime = (long) (1000 * sizeInBytes / 10 / 1024 / 1024);
+                               long maxPauseTime = (long) (maxSyncPauseSeconds * 1000);
+                               if (pauseTime > maxPauseTime)
+                                       pauseTime = maxPauseTime;
+                               int sleepCount = (int) (pauseTime / 100);
+                               for (int i = 0; i < sleepCount; i++)
+                               {
+                                       lock (this)
+                                       {
+                                               if (stopMerges || closing)
+                                                       break;
+                                       }
+                                       try
+                                       {
+                                               System.Threading.Thread.Sleep(new System.TimeSpan((System.Int64) 10000 * 100));
+                                       }
+                                       catch (System.Threading.ThreadInterruptedException ie)
+                                       {
+                                               // In 3.0 we will change this to throw
+                                               // InterruptedException instead
+                                               SupportClass.ThreadClass.Current().Interrupt();
+                                               throw new System.SystemException(ie.Message, ie);
+                                       }
+                               }
+                       }
+               }
+               
+               private void  DoWait()
+               {
+                       lock (this)
+                       {
+                               // NOTE: the callers of this method should in theory
+                               // be able to do simply wait(), but, as a defense
+                               // against thread timing hazards where notifyAll()
+                               // falls to be called, we wait for at most 1 second
+                               // and then return so caller can check if wait
+                               // conditions are satisified:
+                               try
+                               {
+                                       System.Threading.Monitor.Wait(this, TimeSpan.FromMilliseconds(1000));
+                               }
+                               catch (System.Threading.ThreadInterruptedException ie)
+                               {
+                                       // In 3.0 we will change this to throw
+                                       // InterruptedException instead
+                                       SupportClass.ThreadClass.Current().Interrupt();
+                                       throw new System.SystemException(ie.Message, ie);
+                               }
+                       }
+               }
+               
+               /// <summary>Walk through all files referenced by the current
+               /// segmentInfos and ask the Directory to sync each file,
+               /// if it wasn't already.  If that succeeds, then we
+               /// prepare a new segments_N file but do not fully commit
+               /// it. 
+               /// </summary>
+        private void StartCommit(long sizeInBytes, System.Collections.Generic.IDictionary<string, string> commitUserData)
+               {
+                       
+                       System.Diagnostics.Debug.Assert(TestPoint("startStartCommit"));
+
+            // TODO: as of LUCENE-2095, we can simplify this method,
+            // since only 1 thread can be in here at once
+                       
+                       if (hitOOM)
+                       {
+                               throw new System.SystemException("this writer hit an OutOfMemoryError; cannot commit");
+                       }
+                       
+                       try
+                       {
+                               
+                               if (infoStream != null)
+                                       Message("startCommit(): start sizeInBytes=" + sizeInBytes);
+                               
+                               if (sizeInBytes > 0)
+                                       SyncPause(sizeInBytes);
+                               
+                               SegmentInfos toSync = null;
+                               long myChangeCount;
+                               
+                               lock (this)
+                               {
+                                       
+                                       // sizeInBytes > 0 means this is an autoCommit at
+                                       // the end of a merge.  If at this point stopMerges
+                                       // is true (which means a rollback() or
+                                       // rollbackTransaction() is waiting for us to
+                                       // finish), we skip the commit to avoid deadlock
+                                       if (sizeInBytes > 0 && stopMerges)
+                                               return ;
+                                       
+                                       // Wait for any running addIndexes to complete
+                                       // first, then block any from running until we've
+                                       // copied the segmentInfos we intend to sync:
+                                       BlockAddIndexes(false);
+                                       
+                                       // On commit the segmentInfos must never
+                                       // reference a segment in another directory:
+                                       System.Diagnostics.Debug.Assert(!HasExternalSegments());
+                                       
+                                       try
+                                       {
+                                               
+                                               System.Diagnostics.Debug.Assert(lastCommitChangeCount <= changeCount);
+                        myChangeCount = changeCount;
+                                               
+                                               if (changeCount == lastCommitChangeCount)
+                                               {
+                                                       if (infoStream != null)
+                                                               Message("  skip startCommit(): no changes pending");
+                                                       return ;
+                                               }
+                                               
+                                               // First, we clone & incref the segmentInfos we intend
+                                               // to sync, then, without locking, we sync() each file
+                                               // referenced by toSync, in the background.  Multiple
+                                               // threads can be doing this at once, if say a large
+                                               // merge and a small merge finish at the same time:
+                                               
+                                               if (infoStream != null)
+                                                       Message("startCommit index=" + SegString(segmentInfos) + " changeCount=" + changeCount);
+
+                        readerPool.Commit();
+                                               
+                                               // It's possible another flush (that did not close
+                        // the open do stores) snuck in after the flush we
+                        // just did, so we remove any tail segments
+                        // referencing the open doc store from the
+                        // SegmentInfos we are about to sync (the main
+                        // SegmentInfos will keep them):
+                        toSync = (SegmentInfos) segmentInfos.Clone();
+                        string dss = docWriter.GetDocStoreSegment();
+                        if (dss != null)
+                        {
+                            while (true)
+                            {
+                                String dss2 = toSync.Info(toSync.Count - 1).GetDocStoreSegment();
+                                if (dss2 == null || !dss2.Equals(dss))
+                                {
+                                    break;
+                                }
+                                toSync.RemoveAt(toSync.Count - 1);
+                                changeCount++;
+                            }
+                        }
+                                               
+                                               if (commitUserData != null)
+                                                       toSync.SetUserData(commitUserData);
+                                               
+                                               deleter.IncRef(toSync, false);
+                                                                                               
+                                               System.Collections.Generic.IEnumerator<string> it = toSync.Files(directory, false).GetEnumerator();
+                                               while (it.MoveNext())
+                                               {
+                                                       System.String fileName = it.Current;
+                                                       System.Diagnostics.Debug.Assert(directory.FileExists(fileName), "file " + fileName + " does not exist");
+                            // If this trips it means we are missing a call to
+                            // .checkpoint somewhere, because by the time we
+                            // are called, deleter should know about every
+                            // file referenced by the current head
+                            // segmentInfos:
+                            System.Diagnostics.Debug.Assert(deleter.Exists(fileName));
+                                               }
+                                       }
+                                       finally
+                                       {
+                                               ResumeAddIndexes();
+                                       }
+                               }
+                               
+                               System.Diagnostics.Debug.Assert(TestPoint("midStartCommit"));
+                               
+                               bool setPending = false;
+                               
+                               try
+                               {
+                                       
+                                       // Loop until all files toSync references are sync'd:
+                                       while (true)
+                                       {
+                                               
+                                               System.Collections.Generic.ICollection<System.String> pending = new System.Collections.Generic.List<System.String>();
+                                               
+                                               System.Collections.Generic.IEnumerator<string> it = toSync.Files(directory, false).GetEnumerator();
+                                               while (it.MoveNext())
+                                               {
+                                                       System.String fileName = it.Current;
+                                                       if (StartSync(fileName, pending))
+                                                       {
+                                                               bool success = false;
+                                                               try
+                                                               {
+                                                                       // Because we incRef'd this commit point, above,
+                                                                       // the file had better exist:
+                                                                       System.Diagnostics.Debug.Assert(directory.FileExists(fileName), "file '" + fileName + "' does not exist dir=" + directory);
+                                                                       if (infoStream != null)
+                                                                               Message("now sync " + fileName);
+                                                                       directory.Sync(fileName);
+                                                                       success = true;
+                                                               }
+                                                               finally
+                                                               {
+                                                                       FinishSync(fileName, success);
+                                                               }
+                                                       }
+                                               }
+                                               
+                                               // All files that I require are either synced or being
+                                               // synced by other threads.  If they are being synced,
+                                               // we must at this point block until they are done.
+                                               // If this returns false, that means an error in
+                                               // another thread resulted in failing to actually
+                                               // sync one of our files, so we repeat:
+                                               if (WaitForAllSynced(pending))
+                                                       break;
+                                       }
+                                       
+                                       System.Diagnostics.Debug.Assert(TestPoint("midStartCommit2"));
+                                       
+                                       lock (this)
+                                       {
+                                               // If someone saved a newer version of segments file
+                                               // since I first started syncing my version, I can
+                                               // safely skip saving myself since I've been
+                                               // superseded:
+                                               
+                                               while (true)
+                                               {
+                                                       if (myChangeCount <= lastCommitChangeCount)
+                                                       {
+                                                               if (infoStream != null)
+                                                               {
+                                                                       Message("sync superseded by newer infos");
+                                                               }
+                                                               break;
+                                                       }
+                                                       else if (pendingCommit == null)
+                                                       {
+                                                               // My turn to commit
+                                                               
+                                                               if (segmentInfos.GetGeneration() > toSync.GetGeneration())
+                                                                       toSync.UpdateGeneration(segmentInfos);
+                                                               
+                                                               bool success = false;
+                                                               try
+                                                               {
+                                                                       
+                                                                       // Exception here means nothing is prepared
+                                                                       // (this method unwinds everything it did on
+                                                                       // an exception)
+                                                                       try
+                                                                       {
+                                                                               toSync.PrepareCommit(directory);
+                                                                       }
+                                                                       finally
+                                                                       {
+                                                                               // Have our master segmentInfos record the
+                                                                               // generations we just prepared.  We do this
+                                                                               // on error or success so we don't
+                                                                               // double-write a segments_N file.
+                                                                               segmentInfos.UpdateGeneration(toSync);
+                                                                       }
+                                                                       
+                                                                       System.Diagnostics.Debug.Assert(pendingCommit == null);
+                                                                       setPending = true;
+                                                                       pendingCommit = toSync;
+                                                                       pendingCommitChangeCount = (uint) myChangeCount;
+                                                                       success = true;
+                                                               }
+                                                               finally
+                                                               {
+                                                                       if (!success && infoStream != null)
+                                                                               Message("hit exception committing segments file");
+                                                               }
+                                                               break;
+                                                       }
+                                                       else
+                                                       {
+                                                               // Must wait for other commit to complete
+                                                               DoWait();
+                                                       }
+                                               }
+                                       }
+                                       
+                                       if (infoStream != null)
+                                               Message("done all syncs");
+                                       
+                                       System.Diagnostics.Debug.Assert(TestPoint("midStartCommitSuccess"));
+                               }
+                               finally
+                               {
+                                       lock (this)
+                                       {
+                                               if (!setPending)
+                                                       deleter.DecRef(toSync);
+                                       }
+                               }
+                       }
+                       catch (System.OutOfMemoryException oom)
+                       {
+                               HandleOOM(oom, "startCommit");
+                       }
+                       System.Diagnostics.Debug.Assert(TestPoint("finishStartCommit"));
+               }
+               
+               /// <summary> Returns <code>true</code> iff the index in the named directory is
+               /// currently locked.
+               /// </summary>
+               /// <param name="directory">the directory to check for a lock
+               /// </param>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               public static bool IsLocked(Directory directory)
+               {
+                       return directory.MakeLock(WRITE_LOCK_NAME).IsLocked();
+               }
+               
+               /// <summary> Returns <code>true</code> iff the index in the named directory is
+               /// currently locked.
+               /// </summary>
+               /// <param name="directory">the directory to check for a lock
+               /// </param>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               /// <deprecated> Use {@link #IsLocked(Directory)}
+               /// </deprecated>
+        [Obsolete("Use IsLocked(Directory)")]
+               public static bool IsLocked(System.String directory)
+               {
+                       Directory dir = FSDirectory.GetDirectory(directory);
+                       try
+                       {
+                               return IsLocked(dir);
+                       }
+                       finally
+                       {
+                               dir.Close();
+                       }
+               }
+               
+               /// <summary> Forcibly unlocks the index in the named directory.
+               /// <p/>
+               /// Caution: this should only be used by failure recovery code,
+               /// when it is known that no other process nor thread is in fact
+               /// currently accessing this index.
+               /// </summary>
+               public static void  Unlock(Directory directory)
+               {
+                       directory.MakeLock(IndexWriter.WRITE_LOCK_NAME).Release();
+               }
+               
+               /// <summary> Specifies maximum field length (in number of tokens/terms) in {@link IndexWriter} constructors.
+               /// {@link #SetMaxFieldLength(int)} overrides the value set by
+               /// the constructor.
+               /// </summary>
+               public sealed class MaxFieldLength
+               {
+                       
+                       private int limit;
+                       private System.String name;
+                       
+                       /// <summary> Private type-safe-enum-pattern constructor.
+                       /// 
+                       /// </summary>
+                       /// <param name="name">instance name
+                       /// </param>
+                       /// <param name="limit">maximum field length
+                       /// </param>
+                       internal MaxFieldLength(System.String name, int limit)
+                       {
+                               this.name = name;
+                               this.limit = limit;
+                       }
+                       
+                       /// <summary> Public constructor to allow users to specify the maximum field size limit.
+                       /// 
+                       /// </summary>
+                       /// <param name="limit">The maximum field length
+                       /// </param>
+                       public MaxFieldLength(int limit):this("User-specified", limit)
+                       {
+                       }
+                       
+                       public int GetLimit()
+                       {
+                               return limit;
+                       }
+                       
+                       public override System.String ToString()
+                       {
+                               return name + ":" + limit;
+                       }
+                       
+                       /// <summary>Sets the maximum field length to {@link Integer#MAX_VALUE}. </summary>
+                       public static readonly MaxFieldLength UNLIMITED = new MaxFieldLength("UNLIMITED", System.Int32.MaxValue);
+                       
+                       /// <summary>  Sets the maximum field length to 
+                       /// {@link #DEFAULT_MAX_FIELD_LENGTH} 
+                       /// 
+                       /// </summary>
+                       public static readonly MaxFieldLength LIMITED;
+                       static MaxFieldLength()
+                       {
+                               LIMITED = new MaxFieldLength("LIMITED", Mono.Lucene.Net.Index.IndexWriter.DEFAULT_MAX_FIELD_LENGTH);
+                       }
+               }
+               
+               /// <summary>If {@link #getReader} has been called (ie, this writer
+               /// is in near real-time mode), then after a merge
+               /// completes, this class can be invoked to warm the
+               /// reader on the newly merged segment, before the merge
+               /// commits.  This is not required for near real-time
+               /// search, but will reduce search latency on opening a
+               /// new near real-time reader after a merge completes.
+               /// 
+               /// <p/><b>NOTE:</b> This API is experimental and might
+               /// change in incompatible ways in the next release.<p/>
+               /// 
+               /// <p/><b>NOTE</b>: warm is called before any deletes have
+               /// been carried over to the merged segment. 
+               /// </summary>
+               public abstract class IndexReaderWarmer
+               {
+                       public abstract void  Warm(IndexReader reader);
+               }
+               
+               private IndexReaderWarmer mergedSegmentWarmer;
+               
+               /// <summary>Set the merged segment warmer.  See {@link
+               /// IndexReaderWarmer}. 
+               /// </summary>
+               public virtual void  SetMergedSegmentWarmer(IndexReaderWarmer warmer)
+               {
+                       mergedSegmentWarmer = warmer;
+               }
+               
+               /// <summary>Returns the current merged segment warmer.  See {@link
+               /// IndexReaderWarmer}. 
+               /// </summary>
+               public virtual IndexReaderWarmer GetMergedSegmentWarmer()
+               {
+                       return mergedSegmentWarmer;
+               }
+               
+               private void  HandleOOM(System.OutOfMemoryException oom, System.String location)
+               {
+                       if (infoStream != null)
+                       {
+                               Message("hit OutOfMemoryError inside " + location);
+                       }
+                       hitOOM = true;
+                       throw oom;
+               }
+               
+               // deprecated
+        [Obsolete]
+               private bool allowMinus1Position;
+               
+               /// <summary>Deprecated: emulates IndexWriter's buggy behavior when
+               /// first token(s) have positionIncrement==0 (ie, prior to
+               /// fixing LUCENE-1542) 
+               /// </summary>
+               public virtual void  SetAllowMinus1Position()
+               {
+                       allowMinus1Position = true;
+                       docWriter.SetAllowMinus1Position();
+               }
+               
+               // deprecated
+        [Obsolete]
+               internal virtual bool GetAllowMinus1Position()
+               {
+                       return allowMinus1Position;
+               }
+               
+               // Used only by assert for testing.  Current points:
+               //   startDoFlush
+               //   startCommitMerge
+               //   startStartCommit
+               //   midStartCommit
+               //   midStartCommit2
+               //   midStartCommitSuccess
+               //   finishStartCommit
+               //   startCommitMergeDeletes
+               //   startMergeInit
+               //   startApplyDeletes
+               //   DocumentsWriter.ThreadState.init start
+               public /*internal*/ virtual bool TestPoint(System.String name)
+               {
+                       return true;
+               }
+               
+               internal virtual bool NrtIsCurrent(SegmentInfos infos)
+               {
+                       lock (this)
+                       {
+                               if (!infos.Equals(segmentInfos))
+                               {
+                                       // if any structural changes (new segments), we are
+                                       // stale
+                                       return false;
+                }
+                else if (infos.GetGeneration() != segmentInfos.GetGeneration())
+                {
+                    // if any commit took place since we were opened, we
+                    // are stale
+                    return false;
+                }
+                else
+                {
+                    return !docWriter.AnyChanges();
+                }
+                       }
+               }
+               
+               internal virtual bool IsClosed()
+               {
+                       lock (this)
+                       {
+                               return closed;
+                       }
+               }
+               static IndexWriter()
+               {
+                       DEFAULT_MERGE_FACTOR = LogMergePolicy.DEFAULT_MERGE_FACTOR;
+                       DEFAULT_MAX_MERGE_DOCS = LogDocMergePolicy.DEFAULT_MAX_MERGE_DOCS;
+                       MAX_TERM_LENGTH = DocumentsWriter.MAX_TERM_LENGTH;
+                       {
+                               if (Constants.WINDOWS)
+                                       DEFAULT_MAX_SYNC_PAUSE_SECONDS = 10.0;
+                               else
+                                       DEFAULT_MAX_SYNC_PAUSE_SECONDS = 0.0;
+                       }
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/IntBlockPool.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/IntBlockPool.cs
new file mode 100644 (file)
index 0000000..868225f
--- /dev/null
@@ -0,0 +1,79 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       sealed class IntBlockPool
+       {
+               private void  InitBlock()
+               {
+                       intUpto = DocumentsWriter.INT_BLOCK_SIZE;
+               }
+               
+               public int[][] buffers = new int[10][];
+               
+               internal int bufferUpto = - 1; // Which buffer we are upto
+               public int intUpto; // Where we are in head buffer
+               
+               public int[] buffer; // Current head buffer
+               public int intOffset = - DocumentsWriter.INT_BLOCK_SIZE; // Current head offset
+               
+               private DocumentsWriter docWriter;
+               internal bool trackAllocations;
+               
+               public IntBlockPool(DocumentsWriter docWriter, bool trackAllocations)
+               {
+                       InitBlock();
+                       this.docWriter = docWriter;
+                       this.trackAllocations = trackAllocations;
+               }
+               
+               public void  Reset()
+               {
+                       if (bufferUpto != - 1)
+                       {
+                               if (bufferUpto > 0)
+                               // Recycle all but the first buffer
+                                       docWriter.RecycleIntBlocks(buffers, 1, 1 + bufferUpto);
+                               
+                               // Reuse first buffer
+                               bufferUpto = 0;
+                               intUpto = 0;
+                               intOffset = 0;
+                               buffer = buffers[0];
+                       }
+               }
+               
+               public void  NextBuffer()
+               {
+                       if (1 + bufferUpto == buffers.Length)
+                       {
+                               int[][] newBuffers = new int[(int) (buffers.Length * 1.5)][];
+                               Array.Copy(buffers, 0, newBuffers, 0, buffers.Length);
+                               buffers = newBuffers;
+                       }
+                       buffer = buffers[1 + bufferUpto] = docWriter.GetIntBlock(trackAllocations);
+                       bufferUpto++;
+                       
+                       intUpto = 0;
+                       intOffset += DocumentsWriter.INT_BLOCK_SIZE;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/InvertedDocConsumer.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/InvertedDocConsumer.cs
new file mode 100644 (file)
index 0000000..738e27f
--- /dev/null
@@ -0,0 +1,50 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       abstract class InvertedDocConsumer
+       {
+               
+               /// <summary>Add a new thread </summary>
+               internal abstract InvertedDocConsumerPerThread AddThread(DocInverterPerThread docInverterPerThread);
+               
+               /// <summary>Abort (called after hitting AbortException) </summary>
+               public abstract void  Abort();
+               
+               /// <summary>Flush a new segment </summary>
+               internal abstract void  Flush(System.Collections.IDictionary threadsAndFields, SegmentWriteState state);
+               
+               /// <summary>Close doc stores </summary>
+               internal abstract void  CloseDocStore(SegmentWriteState state);
+               
+               /// <summary>Attempt to free RAM, returning true if any RAM was
+               /// freed 
+               /// </summary>
+               public abstract bool FreeRAM();
+               
+               internal FieldInfos fieldInfos;
+               
+               internal virtual void  SetFieldInfos(FieldInfos fieldInfos)
+               {
+                       this.fieldInfos = fieldInfos;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/InvertedDocConsumerPerField.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/InvertedDocConsumerPerField.cs
new file mode 100644 (file)
index 0000000..a513064
--- /dev/null
@@ -0,0 +1,47 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using Fieldable = Mono.Lucene.Net.Documents.Fieldable;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       abstract class InvertedDocConsumerPerField
+       {
+               
+               // Called once per field, and is given all Fieldable
+               // occurrences for this field in the document.  Return
+               // true if you wish to see inverted tokens for these
+               // fields:
+               internal abstract bool Start(Fieldable[] fields, int count);
+               
+               // Called before a field instance is being processed
+               internal abstract void  Start(Fieldable field);
+               
+               // Called once per inverted token
+               internal abstract void  Add();
+               
+               // Called once per field per document, after all Fieldable
+               // occurrences are inverted
+               internal abstract void  Finish();
+               
+               // Called on hitting an aborting exception
+               public abstract void  Abort();
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/InvertedDocConsumerPerThread.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/InvertedDocConsumerPerThread.cs
new file mode 100644 (file)
index 0000000..7c7ac05
--- /dev/null
@@ -0,0 +1,30 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       abstract class InvertedDocConsumerPerThread
+       {
+               public abstract void  StartDocument();
+               internal abstract InvertedDocConsumerPerField AddField(DocInverterPerField docInverterPerField, FieldInfo fieldInfo);
+               public abstract DocumentsWriter.DocWriter FinishDocument();
+               public abstract void  Abort();
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/InvertedDocEndConsumer.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/InvertedDocEndConsumer.cs
new file mode 100644 (file)
index 0000000..0fb2641
--- /dev/null
@@ -0,0 +1,31 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       abstract class InvertedDocEndConsumer
+       {
+               public abstract InvertedDocEndConsumerPerThread AddThread(DocInverterPerThread docInverterPerThread);
+               public abstract void  Flush(System.Collections.IDictionary threadsAndFields, SegmentWriteState state);
+               internal abstract void  CloseDocStore(SegmentWriteState state);
+               public abstract void  Abort();
+               internal abstract void  SetFieldInfos(FieldInfos fieldInfos);
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/InvertedDocEndConsumerPerField.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/InvertedDocEndConsumerPerField.cs
new file mode 100644 (file)
index 0000000..fdc3d04
--- /dev/null
@@ -0,0 +1,28 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       abstract class InvertedDocEndConsumerPerField
+       {
+               internal abstract void  Finish();
+               internal abstract void  Abort();
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/InvertedDocEndConsumerPerThread.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/InvertedDocEndConsumerPerThread.cs
new file mode 100644 (file)
index 0000000..698e5e3
--- /dev/null
@@ -0,0 +1,30 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       abstract class InvertedDocEndConsumerPerThread
+       {
+               internal abstract void  StartDocument();
+               internal abstract InvertedDocEndConsumerPerField AddField(DocInverterPerField docInverterPerField, FieldInfo fieldInfo);
+               internal abstract void  FinishDocument();
+               internal abstract void  Abort();
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/KeepOnlyLastCommitDeletionPolicy.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/KeepOnlyLastCommitDeletionPolicy.cs
new file mode 100644 (file)
index 0000000..7cc8ff0
--- /dev/null
@@ -0,0 +1,51 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       /// <summary> This {@link IndexDeletionPolicy} implementation that
+       /// keeps only the most recent commit and immediately removes
+       /// all prior commits after a new commit is done.  This is
+       /// the default deletion policy.
+       /// </summary>
+       
+       public sealed class KeepOnlyLastCommitDeletionPolicy : IndexDeletionPolicy
+       {
+               
+               /// <summary> Deletes all commits except the most recent one.</summary>
+               public void  OnInit(System.Collections.IList commits)
+               {
+                       // Note that commits.size() should normally be 1:
+                       OnCommit(commits);
+               }
+               
+               /// <summary> Deletes all commits except the most recent one.</summary>
+               public void  OnCommit(System.Collections.IList commits)
+               {
+                       // Note that commits.size() should normally be 2 (if not
+                       // called by onInit above):
+                       int size = commits.Count;
+                       for (int i = 0; i < size - 1; i++)
+                       {
+                               ((IndexCommit) commits[i]).Delete();
+                       }
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/LogByteSizeMergePolicy.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/LogByteSizeMergePolicy.cs
new file mode 100644 (file)
index 0000000..0ce1014
--- /dev/null
@@ -0,0 +1,108 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       /// <summary>This is a {@link LogMergePolicy} that measures size of a
+       /// segment as the total byte size of the segment's files. 
+       /// </summary>
+       public class LogByteSizeMergePolicy:LogMergePolicy
+       {
+               
+               /// <seealso cref="setMinMergeMB">
+               /// </seealso>
+               public const double DEFAULT_MIN_MERGE_MB = 1.6;
+               
+               /// <summary>Default maximum segment size.  A segment of this size</summary>
+               /// <seealso cref="setMaxMergeMB">
+               /// </seealso>
+               public static readonly long DEFAULT_MAX_MERGE_MB = System.Int64.MaxValue;
+               
+               public LogByteSizeMergePolicy(IndexWriter writer):base(writer)
+               {
+                       minMergeSize = (long) (DEFAULT_MIN_MERGE_MB * 1024 * 1024);
+            //mgarski - the line below causes an overflow in .NET, resulting in a negative number...
+                       //maxMergeSize = (long) (DEFAULT_MAX_MERGE_MB * 1024 * 1024);
+            maxMergeSize = DEFAULT_MAX_MERGE_MB;
+               }
+               protected internal override long Size(SegmentInfo info)
+               {
+                       return SizeBytes(info);
+               }
+               
+               /// <summary><p/>Determines the largest segment (measured by total
+               /// byte size of the segment's files, in MB) that may be
+               /// merged with other segments.  Small values (e.g., less
+               /// than 50 MB) are best for interactive indexing, as this
+               /// limits the length of pauses while indexing to a few
+               /// seconds.  Larger values are best for batched indexing
+               /// and speedier searches.<p/>
+               /// 
+               /// <p/>Note that {@link #setMaxMergeDocs} is also
+               /// used to check whether a segment is too large for
+               /// merging (it's either or).<p/>
+               /// </summary>
+               public virtual void  SetMaxMergeMB(double mb)
+               {
+            //mgarski: java gracefully overflows to Int64.MaxValue, .NET to MinValue...
+                       maxMergeSize = (long) (mb * 1024 * 1024);
+            if (maxMergeSize < 0)
+            {
+                maxMergeSize = DEFAULT_MAX_MERGE_MB;
+            }
+               }
+               
+               /// <summary>Returns the largest segment (meaured by total byte
+               /// size of the segment's files, in MB) that may be merged
+               /// with other segments.
+               /// </summary>
+               /// <seealso cref="setMaxMergeMB">
+               /// </seealso>
+               public virtual double GetMaxMergeMB()
+               {
+                       return ((double) maxMergeSize) / 1024 / 1024;
+               }
+               
+               /// <summary>Sets the minimum size for the lowest level segments.
+               /// Any segments below this size are considered to be on
+               /// the same level (even if they vary drastically in size)
+               /// and will be merged whenever there are mergeFactor of
+               /// them.  This effectively truncates the "long tail" of
+               /// small segments that would otherwise be created into a
+               /// single level.  If you set this too large, it could
+               /// greatly increase the merging cost during indexing (if
+               /// you flush many small segments). 
+               /// </summary>
+               public virtual void  SetMinMergeMB(double mb)
+               {
+                       minMergeSize = (long) (mb * 1024 * 1024);
+               }
+               
+               /// <summary>Get the minimum size for a segment to remain
+               /// un-merged.
+               /// </summary>
+               /// <seealso cref="setMinMergeMB">
+               /// </seealso>
+               public virtual double GetMinMergeMB()
+               {
+                       return ((double) minMergeSize) / 1024 / 1024;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/LogDocMergePolicy.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/LogDocMergePolicy.cs
new file mode 100644 (file)
index 0000000..43dd6eb
--- /dev/null
@@ -0,0 +1,73 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       /// <summary>This is a {@link LogMergePolicy} that measures size of a
+       /// segment as the number of documents (not taking deletions
+       /// into account). 
+       /// </summary>
+       
+       public class LogDocMergePolicy:LogMergePolicy
+       {
+               
+               /// <seealso cref="setMinMergeDocs">
+               /// </seealso>
+               public const int DEFAULT_MIN_MERGE_DOCS = 1000;
+               
+               public LogDocMergePolicy(IndexWriter writer):base(writer)
+               {
+                       minMergeSize = DEFAULT_MIN_MERGE_DOCS;
+                       
+                       // maxMergeSize is never used by LogDocMergePolicy; set
+                       // it to Long.MAX_VALUE to disable it
+                       maxMergeSize = System.Int64.MaxValue;
+               }
+               protected internal override long Size(SegmentInfo info)
+               {
+                       return SizeDocs(info);
+               }
+               
+               /// <summary>Sets the minimum size for the lowest level segments.
+               /// Any segments below this size are considered to be on
+               /// the same level (even if they vary drastically in size)
+               /// and will be merged whenever there are mergeFactor of
+               /// them.  This effectively truncates the "long tail" of
+               /// small segments that would otherwise be created into a
+               /// single level.  If you set this too large, it could
+               /// greatly increase the merging cost during indexing (if
+               /// you flush many small segments). 
+               /// </summary>
+               public virtual void  SetMinMergeDocs(int minMergeDocs)
+               {
+                       minMergeSize = minMergeDocs;
+               }
+               
+               /// <summary>Get the minimum size for a segment to remain
+               /// un-merged.
+               /// </summary>
+               /// <seealso cref="setMinMergeDocs">
+               /// </seealso>
+               public virtual int GetMinMergeDocs()
+               {
+                       return (int) minMergeSize;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/LogMergePolicy.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/LogMergePolicy.cs
new file mode 100644 (file)
index 0000000..4c02909
--- /dev/null
@@ -0,0 +1,605 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       /// <summary><p/>This class implements a {@link MergePolicy} that tries
+       /// to merge segments into levels of exponentially
+       /// increasing size, where each level has fewer segments than
+       /// the value of the merge factor. Whenever extra segments
+       /// (beyond the merge factor upper bound) are encountered,
+       /// all segments within the level are merged. You can get or
+       /// set the merge factor using {@link #GetMergeFactor()} and
+       /// {@link #SetMergeFactor(int)} respectively.<p/>
+       /// 
+       /// <p/>This class is abstract and requires a subclass to
+       /// define the {@link #size} method which specifies how a
+       /// segment's size is determined.  {@link LogDocMergePolicy}
+       /// is one subclass that measures size by document count in
+       /// the segment.  {@link LogByteSizeMergePolicy} is another
+       /// subclass that measures size as the total byte size of the
+       /// file(s) for the segment.<p/>
+       /// </summary>
+       
+       public abstract class LogMergePolicy:MergePolicy
+       {
+               
+               /// <summary>Defines the allowed range of log(size) for each
+               /// level.  A level is computed by taking the max segment
+               /// log size, minus LEVEL_LOG_SPAN, and finding all
+               /// segments falling within that range. 
+               /// </summary>
+               public const double LEVEL_LOG_SPAN = 0.75;
+               
+               /// <summary>Default merge factor, which is how many segments are
+               /// merged at a time 
+               /// </summary>
+               public const int DEFAULT_MERGE_FACTOR = 10;
+               
+               /// <summary>Default maximum segment size.  A segment of this size</summary>
+               /// <seealso cref="setMaxMergeDocs">
+               /// </seealso>
+               public static readonly int DEFAULT_MAX_MERGE_DOCS = System.Int32.MaxValue;
+
+        /// <summary> Default noCFSRatio.  If a merge's size is >= 10% of
+        ///  the index, then we disable compound file for it.
+        ///  @see #setNoCFSRatio 
+        ///  </summary>
+        public static double DEFAULT_NO_CFS_RATIO = 0.1;
+               
+               private int mergeFactor = DEFAULT_MERGE_FACTOR;
+               
+               internal long minMergeSize;
+               internal long maxMergeSize;
+               internal int maxMergeDocs = DEFAULT_MAX_MERGE_DOCS;
+
+        protected double noCFSRatio = DEFAULT_NO_CFS_RATIO;
+               
+               /* TODO 3.0: change this default to true */
+               protected internal bool calibrateSizeByDeletes = false;
+               
+               private bool useCompoundFile = true;
+               private bool useCompoundDocStore = true;
+               
+               public LogMergePolicy(IndexWriter writer):base(writer)
+               {
+               }
+               
+               protected internal virtual bool Verbose()
+               {
+                       return writer != null && writer.Verbose();
+               }
+
+
+        /** @see #setNoCFSRatio */
+        public double GetNoCFSRatio()
+        {
+            return noCFSRatio;
+        }
+
+        /** If a merged segment will be more than this percentage
+         *  of the total size of the index, leave the segment as
+         *  non-compound file even if compound file is enabled.
+         *  Set to 1.0 to always use CFS regardless of merge
+         *  size. */
+        public void SetNoCFSRatio(double noCFSRatio)
+        {
+            if (noCFSRatio < 0.0 || noCFSRatio > 1.0)
+            {
+                throw new ArgumentException("noCFSRatio must be 0.0 to 1.0 inclusive; got " + noCFSRatio);
+            }
+            this.noCFSRatio = noCFSRatio;
+        }
+               
+               private void  Message(System.String message)
+               {
+                       if (Verbose())
+                               writer.Message("LMP: " + message);
+               }
+               
+               /// <summary><p/>Returns the number of segments that are merged at
+               /// once and also controls the total number of segments
+               /// allowed to accumulate in the index.<p/> 
+               /// </summary>
+               public virtual int GetMergeFactor()
+               {
+                       return mergeFactor;
+               }
+               
+               /// <summary>Determines how often segment indices are merged by
+               /// addDocument().  With smaller values, less RAM is used
+               /// while indexing, and searches on unoptimized indices are
+               /// faster, but indexing speed is slower.  With larger
+               /// values, more RAM is used during indexing, and while
+               /// searches on unoptimized indices are slower, indexing is
+        /// faster.  Thus larger values (&gt; 10) are best for batch
+        /// index creation, and smaller values (&lt; 10) for indices
+               /// that are interactively maintained. 
+               /// </summary>
+               public virtual void  SetMergeFactor(int mergeFactor)
+               {
+                       if (mergeFactor < 2)
+                               throw new System.ArgumentException("mergeFactor cannot be less than 2");
+                       this.mergeFactor = mergeFactor;
+               }
+               
+               // Javadoc inherited
+               public override bool UseCompoundFile(SegmentInfos infos, SegmentInfo info)
+               {
+                       return useCompoundFile;
+               }
+               
+               /// <summary>Sets whether compound file format should be used for
+               /// newly flushed and newly merged segments. 
+               /// </summary>
+               public virtual void  SetUseCompoundFile(bool useCompoundFile)
+               {
+                       this.useCompoundFile = useCompoundFile;
+               }
+               
+               /// <summary>Returns true if newly flushed and newly merge segments</summary>
+        /// <seealso cref="SetUseCompoundFile">
+               /// </seealso>
+               public virtual bool GetUseCompoundFile()
+               {
+                       return useCompoundFile;
+               }
+               
+               // Javadoc inherited
+               public override bool UseCompoundDocStore(SegmentInfos infos)
+               {
+                       return useCompoundDocStore;
+               }
+               
+               /// <summary>Sets whether compound file format should be used for
+               /// newly flushed and newly merged doc store
+               /// segment files (term vectors and stored fields). 
+               /// </summary>
+               public virtual void  SetUseCompoundDocStore(bool useCompoundDocStore)
+               {
+                       this.useCompoundDocStore = useCompoundDocStore;
+               }
+               
+               /// <summary>Returns true if newly flushed and newly merge doc
+               /// store segment files (term vectors and stored fields)
+               /// </summary>
+        /// <seealso cref="SetUseCompoundDocStore ">
+               /// </seealso>
+               public virtual bool GetUseCompoundDocStore()
+               {
+                       return useCompoundDocStore;
+               }
+               
+               /// <summary>Sets whether the segment size should be calibrated by
+               /// the number of deletes when choosing segments for merge. 
+               /// </summary>
+               public virtual void  SetCalibrateSizeByDeletes(bool calibrateSizeByDeletes)
+               {
+                       this.calibrateSizeByDeletes = calibrateSizeByDeletes;
+               }
+               
+               /// <summary>Returns true if the segment size should be calibrated 
+               /// by the number of deletes when choosing segments for merge. 
+               /// </summary>
+               public virtual bool GetCalibrateSizeByDeletes()
+               {
+                       return calibrateSizeByDeletes;
+               }
+               
+               public override void  Close()
+               {
+               }
+               
+               abstract protected internal long Size(SegmentInfo info);
+               
+               protected internal virtual long SizeDocs(SegmentInfo info)
+               {
+                       if (calibrateSizeByDeletes)
+                       {
+                               int delCount = writer.NumDeletedDocs(info);
+                               return (info.docCount - (long) delCount);
+                       }
+                       else
+                       {
+                               return info.docCount;
+                       }
+               }
+               
+               protected internal virtual long SizeBytes(SegmentInfo info)
+               {
+                       long byteSize = info.SizeInBytes();
+                       if (calibrateSizeByDeletes)
+                       {
+                               int delCount = writer.NumDeletedDocs(info);
+                               float delRatio = (info.docCount <= 0?0.0f:((float) delCount / (float) info.docCount));
+                               return (info.docCount <= 0?byteSize:(long) (byteSize * (1.0f - delRatio)));
+                       }
+                       else
+                       {
+                               return byteSize;
+                       }
+               }
+               
+               private bool IsOptimized(SegmentInfos infos, int maxNumSegments, System.Collections.Hashtable segmentsToOptimize)
+               {
+                       int numSegments = infos.Count;
+                       int numToOptimize = 0;
+                       SegmentInfo optimizeInfo = null;
+                       for (int i = 0; i < numSegments && numToOptimize <= maxNumSegments; i++)
+                       {
+                               SegmentInfo info = infos.Info(i);
+                               if (segmentsToOptimize.Contains(info))
+                               {
+                                       numToOptimize++;
+                                       optimizeInfo = info;
+                               }
+                       }
+                       
+                       return numToOptimize <= maxNumSegments && (numToOptimize != 1 || IsOptimized(optimizeInfo));
+               }
+               
+               /// <summary>Returns true if this single info is optimized (has no
+               /// pending norms or deletes, is in the same dir as the
+               /// writer, and matches the current compound file setting 
+               /// </summary>
+               private bool IsOptimized(SegmentInfo info)
+               {
+                       bool hasDeletions = writer.NumDeletedDocs(info) > 0;
+                       return !hasDeletions && !info.HasSeparateNorms() && info.dir == writer.GetDirectory() &&
+                (info.GetUseCompoundFile() == useCompoundFile || noCFSRatio < 1.0);
+               }
+               
+               /// <summary>Returns the merges necessary to optimize the index.
+               /// This merge policy defines "optimized" to mean only one
+               /// segment in the index, where that segment has no
+               /// deletions pending nor separate norms, and it is in
+               /// compound file format if the current useCompoundFile
+               /// setting is true.  This method returns multiple merges
+               /// (mergeFactor at a time) so the {@link MergeScheduler}
+               /// in use may make use of concurrency. 
+               /// </summary>
+               public override MergeSpecification FindMergesForOptimize(SegmentInfos infos, int maxNumSegments, System.Collections.Hashtable segmentsToOptimize)
+               {
+                       MergeSpecification spec;
+                       
+                       System.Diagnostics.Debug.Assert(maxNumSegments > 0);
+                       
+                       if (!IsOptimized(infos, maxNumSegments, segmentsToOptimize))
+                       {
+                               
+                               // Find the newest (rightmost) segment that needs to
+                               // be optimized (other segments may have been flushed
+                               // since optimize started):
+                               int last = infos.Count;
+                               while (last > 0)
+                               {
+                                       SegmentInfo info = infos.Info(--last);
+                                       if (segmentsToOptimize.Contains(info))
+                                       {
+                                               last++;
+                                               break;
+                                       }
+                               }
+                               
+                               if (last > 0)
+                               {
+                                       
+                                       spec = new MergeSpecification();
+                                       
+                                       // First, enroll all "full" merges (size
+                                       // mergeFactor) to potentially be run concurrently:
+                                       while (last - maxNumSegments + 1 >= mergeFactor)
+                                       {
+                        spec.Add(MakeOneMerge(infos, infos.Range(last - mergeFactor, last)));
+                                               last -= mergeFactor;
+                                       }
+                                       
+                                       // Only if there are no full merges pending do we
+                                       // add a final partial (< mergeFactor segments) merge:
+                                       if (0 == spec.merges.Count)
+                                       {
+                                               if (maxNumSegments == 1)
+                                               {
+                                                       
+                                                       // Since we must optimize down to 1 segment, the
+                                                       // choice is simple:
+                                                       if (last > 1 || !IsOptimized(infos.Info(0)))
+                                spec.Add(MakeOneMerge(infos, infos.Range(0, last)));
+                                               }
+                                               else if (last > maxNumSegments)
+                                               {
+                                                       
+                                                       // Take care to pick a partial merge that is
+                                                       // least cost, but does not make the index too
+                                                       // lopsided.  If we always just picked the
+                                                       // partial tail then we could produce a highly
+                                                       // lopsided index over time:
+                                                       
+                                                       // We must merge this many segments to leave
+                                                       // maxNumSegments in the index (from when
+                                                       // optimize was first kicked off):
+                                                       int finalMergeSize = last - maxNumSegments + 1;
+                                                       
+                                                       // Consider all possible starting points:
+                                                       long bestSize = 0;
+                                                       int bestStart = 0;
+                                                       
+                                                       for (int i = 0; i < last - finalMergeSize + 1; i++)
+                                                       {
+                                                               long sumSize = 0;
+                                                               for (int j = 0; j < finalMergeSize; j++)
+                                                                       sumSize += Size(infos.Info(j + i));
+                                                               if (i == 0 || (sumSize < 2 * Size(infos.Info(i - 1)) && sumSize < bestSize))
+                                                               {
+                                                                       bestStart = i;
+                                                                       bestSize = sumSize;
+                                                               }
+                                                       }
+
+                            spec.Add(MakeOneMerge(infos, infos.Range(bestStart, bestStart + finalMergeSize)));
+                                               }
+                                       }
+                               }
+                               else
+                                       spec = null;
+                       }
+                       else
+                               spec = null;
+                       
+                       return spec;
+               }
+               
+               /// <summary> Finds merges necessary to expunge all deletes from the
+               /// index.  We simply merge adjacent segments that have
+               /// deletes, up to mergeFactor at a time.
+               /// </summary>
+               public override MergeSpecification FindMergesToExpungeDeletes(SegmentInfos segmentInfos)
+               {
+                       int numSegments = segmentInfos.Count;
+                       
+                       if (Verbose())
+                               Message("findMergesToExpungeDeletes: " + numSegments + " segments");
+                       
+                       MergeSpecification spec = new MergeSpecification();
+                       int firstSegmentWithDeletions = - 1;
+                       for (int i = 0; i < numSegments; i++)
+                       {
+                               SegmentInfo info = segmentInfos.Info(i);
+                               int delCount = writer.NumDeletedDocs(info);
+                               if (delCount > 0)
+                               {
+                                       if (Verbose())
+                                               Message("  segment " + info.name + " has deletions");
+                                       if (firstSegmentWithDeletions == - 1)
+                                               firstSegmentWithDeletions = i;
+                                       else if (i - firstSegmentWithDeletions == mergeFactor)
+                                       {
+                                               // We've seen mergeFactor segments in a row with
+                                               // deletions, so force a merge now:
+                                               if (Verbose())
+                                                       Message("  add merge " + firstSegmentWithDeletions + " to " + (i - 1) + " inclusive");
+                        spec.Add(MakeOneMerge(segmentInfos, segmentInfos.Range(firstSegmentWithDeletions, i)));
+                                               firstSegmentWithDeletions = i;
+                                       }
+                               }
+                               else if (firstSegmentWithDeletions != - 1)
+                               {
+                                       // End of a sequence of segments with deletions, so,
+                                       // merge those past segments even if it's fewer than
+                                       // mergeFactor segments
+                                       if (Verbose())
+                                               Message("  add merge " + firstSegmentWithDeletions + " to " + (i - 1) + " inclusive");
+                    spec.Add(MakeOneMerge(segmentInfos, segmentInfos.Range(firstSegmentWithDeletions, i)));
+                                       firstSegmentWithDeletions = - 1;
+                               }
+                       }
+                       
+                       if (firstSegmentWithDeletions != - 1)
+                       {
+                               if (Verbose())
+                                       Message("  add merge " + firstSegmentWithDeletions + " to " + (numSegments - 1) + " inclusive");
+                spec.Add(MakeOneMerge(segmentInfos, segmentInfos.Range(firstSegmentWithDeletions, numSegments)));
+                       }
+                       
+                       return spec;
+               }
+               
+               /// <summary>Checks if any merges are now necessary and returns a
+               /// {@link MergePolicy.MergeSpecification} if so.  A merge
+               /// is necessary when there are more than {@link
+               /// #setMergeFactor} segments at a given level.  When
+               /// multiple levels have too many segments, this method
+               /// will return multiple merges, allowing the {@link
+               /// MergeScheduler} to use concurrency. 
+               /// </summary>
+               public override MergeSpecification FindMerges(SegmentInfos infos)
+               {
+                       
+                       int numSegments = infos.Count;
+                       if (Verbose())
+                               Message("findMerges: " + numSegments + " segments");
+                       
+                       // Compute levels, which is just log (base mergeFactor)
+                       // of the size of each segment
+                       float[] levels = new float[numSegments];
+                       float norm = (float) System.Math.Log(mergeFactor);
+                       
+                       for (int i = 0; i < numSegments; i++)
+                       {
+                               SegmentInfo info = infos.Info(i);
+                               long size = Size(info);
+                               
+                               // Floor tiny segments
+                               if (size < 1)
+                                       size = 1;
+                               levels[i] = (float) System.Math.Log(size) / norm;
+                       }
+                       
+                       float levelFloor;
+                       if (minMergeSize <= 0)
+                               levelFloor = (float) 0.0;
+                       else
+                       {
+                               levelFloor = (float) (System.Math.Log(minMergeSize) / norm);
+                       }
+                       
+                       // Now, we quantize the log values into levels.  The
+                       // first level is any segment whose log size is within
+                       // LEVEL_LOG_SPAN of the max size, or, who has such as
+                       // segment "to the right".  Then, we find the max of all
+                       // other segments and use that to define the next level
+                       // segment, etc.
+                       
+                       MergeSpecification spec = null;
+                       
+                       int start = 0;
+                       while (start < numSegments)
+                       {
+                               
+                               // Find max level of all segments not already
+                               // quantized.
+                               float maxLevel = levels[start];
+                               for (int i = 1 + start; i < numSegments; i++)
+                               {
+                                       float level = levels[i];
+                                       if (level > maxLevel)
+                                               maxLevel = level;
+                               }
+                               
+                               // Now search backwards for the rightmost segment that
+                               // falls into this level:
+                               float levelBottom;
+                               if (maxLevel < levelFloor)
+                               // All remaining segments fall into the min level
+                                       levelBottom = - 1.0F;
+                               else
+                               {
+                                       levelBottom = (float) (maxLevel - LEVEL_LOG_SPAN);
+                                       
+                                       // Force a boundary at the level floor
+                                       if (levelBottom < levelFloor && maxLevel >= levelFloor)
+                                               levelBottom = levelFloor;
+                               }
+                               
+                               int upto = numSegments - 1;
+                               while (upto >= start)
+                               {
+                                       if (levels[upto] >= levelBottom)
+                                       {
+                                               break;
+                                       }
+                                       upto--;
+                               }
+                               if (Verbose())
+                                       Message("  level " + levelBottom + " to " + maxLevel + ": " + (1 + upto - start) + " segments");
+                               
+                               // Finally, record all merges that are viable at this level:
+                               int end = start + mergeFactor;
+                               while (end <= 1 + upto)
+                               {
+                                       bool anyTooLarge = false;
+                                       for (int i = start; i < end; i++)
+                                       {
+                                               SegmentInfo info = infos.Info(i);
+                                               anyTooLarge |= (Size(info) >= maxMergeSize || SizeDocs(info) >= maxMergeDocs);
+                                       }
+                                       
+                                       if (!anyTooLarge)
+                                       {
+                                               if (spec == null)
+                                                       spec = new MergeSpecification();
+                                               if (Verbose())
+                                                       Message("    " + start + " to " + end + ": add this merge");
+                        spec.Add(MakeOneMerge(infos, infos.Range(start, end)));
+                                       }
+                                       else if (Verbose())
+                                               Message("    " + start + " to " + end + ": contains segment over maxMergeSize or maxMergeDocs; skipping");
+                                       
+                                       start = end;
+                                       end = start + mergeFactor;
+                               }
+                               
+                               start = 1 + upto;
+                       }
+                       
+                       return spec;
+               }
+        
+        protected OneMerge MakeOneMerge(SegmentInfos infos, SegmentInfos infosToMerge)
+        {
+            bool doCFS;
+            if (!useCompoundFile)
+            {
+                doCFS = false;
+            }
+            else if (noCFSRatio == 1.0)
+            {
+                doCFS = true;
+            }
+            else
+            {
+                long totSize = 0;
+                for (int i = 0; i < infos.Count; i++)
+                {
+                    totSize += Size(infos.Info(i));
+                }
+                long mergeSize = 0;
+                for (int i = 0; i < infosToMerge.Count; i++)
+                {
+                    mergeSize += Size(infosToMerge.Info(i));
+                }
+
+                doCFS = mergeSize <= noCFSRatio * totSize;
+            }
+
+            return new OneMerge(infosToMerge, doCFS);
+        }
+               
+               /// <summary><p/>Determines the largest segment (measured by
+               /// document count) that may be merged with other segments.
+               /// Small values (e.g., less than 10,000) are best for
+               /// interactive indexing, as this limits the length of
+               /// pauses while indexing to a few seconds.  Larger values
+               /// are best for batched indexing and speedier
+               /// searches.<p/>
+               /// 
+               /// <p/>The default value is {@link Integer#MAX_VALUE}.<p/>
+               /// 
+               /// <p/>The default merge policy ({@link
+               /// LogByteSizeMergePolicy}) also allows you to set this
+               /// limit by net size (in MB) of the segment, using {@link
+               /// LogByteSizeMergePolicy#setMaxMergeMB}.<p/>
+               /// </summary>
+               public virtual void  SetMaxMergeDocs(int maxMergeDocs)
+               {
+                       this.maxMergeDocs = maxMergeDocs;
+               }
+               
+               /// <summary>Returns the largest segment (measured by document
+               /// count) that may be merged with other segments.
+               /// </summary>
+               /// <seealso cref="setMaxMergeDocs">
+               /// </seealso>
+               public virtual int GetMaxMergeDocs()
+               {
+                       return maxMergeDocs;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/MergeDocIDRemapper.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/MergeDocIDRemapper.cs
new file mode 100644 (file)
index 0000000..7768dd1
--- /dev/null
@@ -0,0 +1,126 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       /// <summary>Remaps docIDs after a merge has completed, where the
+       /// merged segments had at least one deletion.  This is used
+       /// to renumber the buffered deletes in IndexWriter when a
+       /// merge of segments with deletions commits. 
+       /// </summary>
+       
+       sealed class MergeDocIDRemapper
+       {
+               internal int[] starts; // used for binary search of mapped docID
+               internal int[] newStarts; // starts, minus the deletes
+               internal int[][] docMaps; // maps docIDs in the merged set
+               internal int minDocID; // minimum docID that needs renumbering
+               internal int maxDocID; // 1+ the max docID that needs renumbering
+               internal int docShift; // total # deleted docs that were compacted by this merge
+               
+               public MergeDocIDRemapper(SegmentInfos infos, int[][] docMaps, int[] delCounts, MergePolicy.OneMerge merge, int mergedDocCount)
+               {
+                       this.docMaps = docMaps;
+                       SegmentInfo firstSegment = merge.segments.Info(0);
+                       int i = 0;
+                       while (true)
+                       {
+                               SegmentInfo info = infos.Info(i);
+                               if (info.Equals(firstSegment))
+                                       break;
+                               minDocID += info.docCount;
+                               i++;
+                       }
+                       
+                       int numDocs = 0;
+                       for (int j = 0; j < docMaps.Length; i++, j++)
+                       {
+                               numDocs += infos.Info(i).docCount;
+                               System.Diagnostics.Debug.Assert(infos.Info(i).Equals(merge.segments.Info(j)));
+                       }
+                       maxDocID = minDocID + numDocs;
+                       
+                       starts = new int[docMaps.Length];
+                       newStarts = new int[docMaps.Length];
+                       
+                       starts[0] = minDocID;
+                       newStarts[0] = minDocID;
+                       for (i = 1; i < docMaps.Length; i++)
+                       {
+                               int lastDocCount = merge.segments.Info(i - 1).docCount;
+                               starts[i] = starts[i - 1] + lastDocCount;
+                               newStarts[i] = newStarts[i - 1] + lastDocCount - delCounts[i - 1];
+                       }
+                       docShift = numDocs - mergedDocCount;
+                       
+                       // There are rare cases when docShift is 0.  It happens
+                       // if you try to delete a docID that's out of bounds,
+                       // because the SegmentReader still allocates deletedDocs
+                       // and pretends it has deletions ... so we can't make
+                       // this assert here
+                       // assert docShift > 0;
+                       
+                       // Make sure it all adds up:
+                       System.Diagnostics.Debug.Assert(docShift == maxDocID -(newStarts [docMaps.Length - 1] + merge.segments.Info(docMaps.Length - 1).docCount - delCounts [docMaps.Length - 1]));
+               }
+               
+               public int Remap(int oldDocID)
+               {
+                       if (oldDocID < minDocID)
+                       // Unaffected by merge
+                               return oldDocID;
+                       else if (oldDocID >= maxDocID)
+                       // This doc was "after" the merge, so simple shift
+                               return oldDocID - docShift;
+                       else
+                       {
+                               // Binary search to locate this document & find its new docID
+                               int lo = 0; // search starts array
+                               int hi = docMaps.Length - 1; // for first element less
+                               
+                               while (hi >= lo)
+                               {
+                                       int mid = SupportClass.Number.URShift((lo + hi), 1);
+                                       int midValue = starts[mid];
+                                       if (oldDocID < midValue)
+                                               hi = mid - 1;
+                                       else if (oldDocID > midValue)
+                                               lo = mid + 1;
+                                       else
+                                       {
+                                               // found a match
+                                               while (mid + 1 < docMaps.Length && starts[mid + 1] == midValue)
+                                               {
+                                                       mid++; // scan to last match
+                                               }
+                                               if (docMaps[mid] != null)
+                                                       return newStarts[mid] + docMaps[mid][oldDocID - starts[mid]];
+                                               else
+                                                       return newStarts[mid] + oldDocID - starts[mid];
+                                       }
+                               }
+                               if (docMaps[hi] != null)
+                                       return newStarts[hi] + docMaps[hi][oldDocID - starts[hi]];
+                               else
+                                       return newStarts[hi] + oldDocID - starts[hi];
+                       }
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/MergePolicy.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/MergePolicy.cs
new file mode 100644 (file)
index 0000000..63bddb3
--- /dev/null
@@ -0,0 +1,309 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using Directory = Mono.Lucene.Net.Store.Directory;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       /// <summary> <p/>Expert: a MergePolicy determines the sequence of
+       /// primitive merge operations to be used for overall merge
+       /// and optimize operations.<p/>
+       /// 
+       /// <p/>Whenever the segments in an index have been altered by
+       /// {@link IndexWriter}, either the addition of a newly
+       /// flushed segment, addition of many segments from
+       /// addIndexes* calls, or a previous merge that may now need
+       /// to cascade, {@link IndexWriter} invokes {@link
+       /// #findMerges} to give the MergePolicy a chance to pick
+       /// merges that are now required.  This method returns a
+       /// {@link MergeSpecification} instance describing the set of
+       /// merges that should be done, or null if no merges are
+       /// necessary.  When IndexWriter.optimize is called, it calls
+       /// {@link #findMergesForOptimize} and the MergePolicy should
+       /// then return the necessary merges.<p/>
+       /// 
+       /// <p/>Note that the policy can return more than one merge at
+       /// a time.  In this case, if the writer is using {@link
+       /// SerialMergeScheduler}, the merges will be run
+       /// sequentially but if it is using {@link
+       /// ConcurrentMergeScheduler} they will be run concurrently.<p/>
+       /// 
+       /// <p/>The default MergePolicy is {@link
+       /// LogByteSizeMergePolicy}.<p/>
+       /// 
+       /// <p/><b>NOTE:</b> This API is new and still experimental
+       /// (subject to change suddenly in the next release)<p/>
+       /// 
+       /// <p/><b>NOTE</b>: This class typically requires access to
+       /// package-private APIs (e.g. <code>SegmentInfos</code>) to do its job;
+       /// if you implement your own MergePolicy, you'll need to put
+       /// it in package Mono.Lucene.Net.Index in order to use
+       /// these APIs.
+       /// </summary>
+       
+       public abstract class MergePolicy
+       {
+               
+               /// <summary>OneMerge provides the information necessary to perform
+               /// an individual primitive merge operation, resulting in
+               /// a single new segment.  The merge spec includes the
+               /// subset of segments to be merged as well as whether the
+               /// new segment should use the compound file format. 
+               /// </summary>
+               
+               public class OneMerge
+               {
+                       
+                       internal SegmentInfo info; // used by IndexWriter
+                       internal bool mergeDocStores; // used by IndexWriter
+                       internal bool optimize; // used by IndexWriter
+                       internal bool registerDone; // used by IndexWriter
+                       internal long mergeGen; // used by IndexWriter
+                       internal bool isExternal; // used by IndexWriter
+                       internal int maxNumSegmentsOptimize; // used by IndexWriter
+                       internal SegmentReader[] readers; // used by IndexWriter
+                       internal SegmentReader[] readersClone; // used by IndexWriter
+                       internal SegmentInfos segments;
+                       internal bool useCompoundFile;
+                       internal bool aborted;
+                       internal System.Exception error;
+
+            internal volatile bool mergeDone;     // used by IndexWriter
+                       
+                       public OneMerge(SegmentInfos segments, bool useCompoundFile)
+                       {
+                               if (0 == segments.Count)
+                                       throw new System.SystemException("segments must include at least one segment");
+                               this.segments = segments;
+                               this.useCompoundFile = useCompoundFile;
+                       }
+                       
+                       /// <summary>Record that an exception occurred while executing
+                       /// this merge 
+                       /// </summary>
+                       internal virtual void  SetException(System.Exception error)
+                       {
+                               lock (this)
+                               {
+                                       this.error = error;
+                               }
+                       }
+                       
+                       /// <summary>Retrieve previous exception set by {@link
+                       /// #setException}. 
+                       /// </summary>
+                       internal virtual System.Exception GetException()
+                       {
+                               lock (this)
+                               {
+                                       return error;
+                               }
+                       }
+                       
+                       /// <summary>Mark this merge as aborted.  If this is called
+                       /// before the merge is committed then the merge will
+                       /// not be committed. 
+                       /// </summary>
+                       internal virtual void  Abort()
+                       {
+                               lock (this)
+                               {
+                                       aborted = true;
+                               }
+                       }
+                       
+                       /// <summary>Returns true if this merge was aborted. </summary>
+                       internal virtual bool IsAborted()
+                       {
+                               lock (this)
+                               {
+                                       return aborted;
+                               }
+                       }
+                       
+                       internal virtual void  CheckAborted(Directory dir)
+                       {
+                               lock (this)
+                               {
+                                       if (aborted)
+                                               throw new MergeAbortedException("merge is aborted: " + SegString(dir));
+                               }
+                       }
+                       
+                       internal virtual System.String SegString(Directory dir)
+                       {
+                               System.Text.StringBuilder b = new System.Text.StringBuilder();
+                               int numSegments = segments.Count;
+                               for (int i = 0; i < numSegments; i++)
+                               {
+                                       if (i > 0)
+                                               b.Append(' ');
+                                       b.Append(segments.Info(i).SegString(dir));
+                               }
+                               if (info != null)
+                                       b.Append(" into ").Append(info.name);
+                               if (optimize)
+                                       b.Append(" [optimize]");
+                               if (mergeDocStores)
+                               {
+                                       b.Append(" [mergeDocStores]");
+                               }
+                               return b.ToString();
+                       }
+
+            public SegmentInfos segments_ForNUnit
+            {
+                get { return segments; }
+            }
+               }
+               
+               /// <summary> A MergeSpecification instance provides the information
+               /// necessary to perform multiple merges.  It simply
+               /// contains a list of {@link OneMerge} instances.
+               /// </summary>
+               
+               public class MergeSpecification
+               {
+                       
+                       /// <summary> The subset of segments to be included in the primitive merge.</summary>
+                       
+                       public System.Collections.IList merges = new System.Collections.ArrayList();
+                       
+                       public virtual void  Add(OneMerge merge)
+                       {
+                               merges.Add(merge);
+                       }
+                       
+                       public virtual System.String SegString(Directory dir)
+                       {
+                               System.Text.StringBuilder b = new System.Text.StringBuilder();
+                               b.Append("MergeSpec:\n");
+                               int count = merges.Count;
+                               for (int i = 0; i < count; i++)
+                                       b.Append("  ").Append(1 + i).Append(": ").Append(((OneMerge) merges[i]).SegString(dir));
+                               return b.ToString();
+                       }
+               }
+               
+               /// <summary>Exception thrown if there are any problems while
+               /// executing a merge. 
+               /// </summary>
+               [Serializable]
+               public class MergeException:System.SystemException
+               {
+                       private Directory dir;
+                       /// <deprecated>
+                       /// Use {@link #MergePolicy.MergeException(String,Directory)} instead 
+                       /// </deprecated>
+            [Obsolete("Use MergePolicy.MergeException(String,Directory) instead ")]
+                       public MergeException(System.String message):base(message)
+                       {
+                       }
+                       public MergeException(System.String message, Directory dir):base(message)
+                       {
+                               this.dir = dir;
+                       }
+                       /// <deprecated>
+                       /// Use {@link #MergePolicy.MergeException(Throwable,Directory)} instead 
+                       /// </deprecated>
+            [Obsolete("Use MergePolicy.MergeException(Throwable,Directory) instead ")]
+                       public MergeException(System.Exception exc):base(null, exc)
+                       {
+                       }
+                       public MergeException(System.Exception exc, Directory dir):base(null, exc)
+                       {
+                               this.dir = dir;
+                       }
+                       /// <summary>Returns the {@link Directory} of the index that hit
+                       /// the exception. 
+                       /// </summary>
+                       public virtual Directory GetDirectory()
+                       {
+                               return dir;
+                       }
+               }
+               
+               [Serializable]
+               public class MergeAbortedException:System.IO.IOException
+               {
+                       public MergeAbortedException():base("merge is aborted")
+                       {
+                       }
+                       public MergeAbortedException(System.String message):base(message)
+                       {
+                       }
+               }
+               
+               protected internal IndexWriter writer;
+               
+               public MergePolicy(IndexWriter writer)
+               {
+                       this.writer = writer;
+               }
+               
+               /// <summary> Determine what set of merge operations are now necessary on the index.
+               /// {@link IndexWriter} calls this whenever there is a change to the segments.
+               /// This call is always synchronized on the {@link IndexWriter} instance so
+               /// only one thread at a time will call this method.
+               /// 
+               /// </summary>
+               /// <param name="segmentInfos">the total set of segments in the index
+               /// </param>
+               public abstract MergeSpecification FindMerges(SegmentInfos segmentInfos);
+               
+               /// <summary> Determine what set of merge operations is necessary in order to optimize
+               /// the index. {@link IndexWriter} calls this when its
+               /// {@link IndexWriter#Optimize()} method is called. This call is always
+               /// synchronized on the {@link IndexWriter} instance so only one thread at a
+               /// time will call this method.
+               /// 
+               /// </summary>
+               /// <param name="segmentInfos">the total set of segments in the index
+               /// </param>
+               /// <param name="maxSegmentCount">requested maximum number of segments in the index (currently this
+               /// is always 1)
+               /// </param>
+               /// <param name="segmentsToOptimize">contains the specific SegmentInfo instances that must be merged
+               /// away. This may be a subset of all SegmentInfos.
+               /// </param>
+               public abstract MergeSpecification FindMergesForOptimize(SegmentInfos segmentInfos, int maxSegmentCount, System.Collections.Hashtable segmentsToOptimize);
+               
+               /// <summary> Determine what set of merge operations is necessary in order to expunge all
+               /// deletes from the index.
+               /// 
+               /// </summary>
+               /// <param name="segmentInfos">the total set of segments in the index
+               /// </param>
+               public abstract MergeSpecification FindMergesToExpungeDeletes(SegmentInfos segmentInfos);
+               
+               /// <summary> Release all resources for the policy.</summary>
+               public abstract void  Close();
+               
+               /// <summary> Returns true if a newly flushed (not from merge)
+               /// segment should use the compound file format.
+               /// </summary>
+               public abstract bool UseCompoundFile(SegmentInfos segments, SegmentInfo newSegment);
+               
+               /// <summary> Returns true if the doc store files should use the
+               /// compound file format.
+               /// </summary>
+               public abstract bool UseCompoundDocStore(SegmentInfos segments);
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/MergeScheduler.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/MergeScheduler.cs
new file mode 100644 (file)
index 0000000..d692a56
--- /dev/null
@@ -0,0 +1,47 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       /// <summary><p/>Expert: {@link IndexWriter} uses an instance
+       /// implementing this interface to execute the merges
+       /// selected by a {@link MergePolicy}.  The default
+       /// MergeScheduler is {@link ConcurrentMergeScheduler}.<p/>
+       /// 
+       /// <p/><b>NOTE:</b> This API is new and still experimental
+       /// (subject to change suddenly in the next release)<p/>
+       /// 
+       /// <p/><b>NOTE</b>: This class typically requires access to
+       /// package-private APIs (eg, SegmentInfos) to do its job;
+       /// if you implement your own MergePolicy, you'll need to put
+       /// it in package Mono.Lucene.Net.Index in order to use
+       /// these APIs.
+       /// </summary>
+       
+       public abstract class MergeScheduler
+       {
+               
+               /// <summary>Run the merges provided by {@link IndexWriter#GetNextMerge()}. </summary>
+               public abstract void  Merge(IndexWriter writer);
+               
+               /// <summary>Close this MergeScheduler. </summary>
+               public abstract void  Close();
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/MultiLevelSkipListReader.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/MultiLevelSkipListReader.cs
new file mode 100644 (file)
index 0000000..fbae301
--- /dev/null
@@ -0,0 +1,319 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using BufferedIndexInput = Mono.Lucene.Net.Store.BufferedIndexInput;
+using IndexInput = Mono.Lucene.Net.Store.IndexInput;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       /// <summary> This abstract class reads skip lists with multiple levels.
+       /// 
+       /// See {@link MultiLevelSkipListWriter} for the information about the encoding 
+       /// of the multi level skip lists. 
+       /// 
+       /// Subclasses must implement the abstract method {@link #ReadSkipData(int, IndexInput)}
+       /// which defines the actual format of the skip data.
+       /// </summary>
+       abstract class MultiLevelSkipListReader
+       {
+               // the maximum number of skip levels possible for this index
+               private int maxNumberOfSkipLevels;
+               
+               // number of levels in this skip list
+               private int numberOfSkipLevels;
+               
+               // Expert: defines the number of top skip levels to buffer in memory.
+               // Reducing this number results in less memory usage, but possibly
+               // slower performance due to more random I/Os.
+               // Please notice that the space each level occupies is limited by
+               // the skipInterval. The top level can not contain more than
+               // skipLevel entries, the second top level can not contain more
+               // than skipLevel^2 entries and so forth.
+               private int numberOfLevelsToBuffer = 1;
+               
+               private int docCount;
+               private bool haveSkipped;
+               
+               private IndexInput[] skipStream; // skipStream for each level
+               private long[] skipPointer; // the start pointer of each skip level
+               private int[] skipInterval; // skipInterval of each level
+               private int[] numSkipped; // number of docs skipped per level
+               
+               private int[] skipDoc; // doc id of current skip entry per level 
+               private int lastDoc; // doc id of last read skip entry with docId <= target
+               private long[] childPointer; // child pointer of current skip entry per level
+               private long lastChildPointer; // childPointer of last read skip entry with docId <= target
+               
+               private bool inputIsBuffered;
+               
+               public MultiLevelSkipListReader(IndexInput skipStream, int maxSkipLevels, int skipInterval)
+               {
+                       this.skipStream = new IndexInput[maxSkipLevels];
+                       this.skipPointer = new long[maxSkipLevels];
+                       this.childPointer = new long[maxSkipLevels];
+                       this.numSkipped = new int[maxSkipLevels];
+                       this.maxNumberOfSkipLevels = maxSkipLevels;
+                       this.skipInterval = new int[maxSkipLevels];
+                       this.skipStream[0] = skipStream;
+                       this.inputIsBuffered = (skipStream is BufferedIndexInput);
+                       this.skipInterval[0] = skipInterval;
+                       for (int i = 1; i < maxSkipLevels; i++)
+                       {
+                               // cache skip intervals
+                               this.skipInterval[i] = this.skipInterval[i - 1] * skipInterval;
+                       }
+                       skipDoc = new int[maxSkipLevels];
+               }
+               
+               
+               /// <summary>Returns the id of the doc to which the last call of {@link #SkipTo(int)}
+               /// has skipped.  
+               /// </summary>
+               internal virtual int GetDoc()
+               {
+                       return lastDoc;
+               }
+               
+               
+               /// <summary>Skips entries to the first beyond the current whose document number is
+               /// greater than or equal to <i>target</i>. Returns the current doc count. 
+               /// </summary>
+               internal virtual int SkipTo(int target)
+               {
+                       if (!haveSkipped)
+                       {
+                               // first time, load skip levels
+                               LoadSkipLevels();
+                               haveSkipped = true;
+                       }
+                       
+                       // walk up the levels until highest level is found that has a skip
+                       // for this target
+                       int level = 0;
+                       while (level < numberOfSkipLevels - 1 && target > skipDoc[level + 1])
+                       {
+                               level++;
+                       }
+                       
+                       while (level >= 0)
+                       {
+                               if (target > skipDoc[level])
+                               {
+                                       if (!LoadNextSkip(level))
+                                       {
+                                               continue;
+                                       }
+                               }
+                               else
+                               {
+                                       // no more skips on this level, go down one level
+                                       if (level > 0 && lastChildPointer > skipStream[level - 1].GetFilePointer())
+                                       {
+                                               SeekChild(level - 1);
+                                       }
+                                       level--;
+                               }
+                       }
+                       
+                       return numSkipped[0] - skipInterval[0] - 1;
+               }
+               
+               private bool LoadNextSkip(int level)
+               {
+                       // we have to skip, the target document is greater than the current
+                       // skip list entry        
+                       SetLastSkipData(level);
+                       
+                       numSkipped[level] += skipInterval[level];
+                       
+                       if (numSkipped[level] > docCount)
+                       {
+                               // this skip list is exhausted
+                               skipDoc[level] = System.Int32.MaxValue;
+                               if (numberOfSkipLevels > level)
+                                       numberOfSkipLevels = level;
+                               return false;
+                       }
+                       
+                       // read next skip entry
+                       skipDoc[level] += ReadSkipData(level, skipStream[level]);
+                       
+                       if (level != 0)
+                       {
+                               // read the child pointer if we are not on the leaf level
+                               childPointer[level] = skipStream[level].ReadVLong() + skipPointer[level - 1];
+                       }
+                       
+                       return true;
+               }
+               
+               /// <summary>Seeks the skip entry on the given level </summary>
+               protected internal virtual void  SeekChild(int level)
+               {
+                       skipStream[level].Seek(lastChildPointer);
+                       numSkipped[level] = numSkipped[level + 1] - skipInterval[level + 1];
+                       skipDoc[level] = lastDoc;
+                       if (level > 0)
+                       {
+                               childPointer[level] = skipStream[level].ReadVLong() + skipPointer[level - 1];
+                       }
+               }
+               
+               internal virtual void  Close()
+               {
+                       for (int i = 1; i < skipStream.Length; i++)
+                       {
+                               if (skipStream[i] != null)
+                               {
+                                       skipStream[i].Close();
+                               }
+                       }
+               }
+               
+               /// <summary>initializes the reader </summary>
+               internal virtual void  Init(long skipPointer, int df)
+               {
+                       this.skipPointer[0] = skipPointer;
+                       this.docCount = df;
+            System.Array.Clear(skipDoc, 0, skipDoc.Length);
+                       System.Array.Clear(numSkipped, 0, numSkipped.Length);
+            System.Array.Clear(childPointer, 0, childPointer.Length);
+                       
+                       haveSkipped = false;
+                       for (int i = 1; i < numberOfSkipLevels; i++)
+                       {
+                               skipStream[i] = null;
+                       }
+               }
+               
+               /// <summary>Loads the skip levels  </summary>
+               private void  LoadSkipLevels()
+               {
+                       numberOfSkipLevels = docCount == 0?0:(int) System.Math.Floor(System.Math.Log(docCount) / System.Math.Log(skipInterval[0]));
+                       if (numberOfSkipLevels > maxNumberOfSkipLevels)
+                       {
+                               numberOfSkipLevels = maxNumberOfSkipLevels;
+                       }
+                       
+                       skipStream[0].Seek(skipPointer[0]);
+                       
+                       int toBuffer = numberOfLevelsToBuffer;
+                       
+                       for (int i = numberOfSkipLevels - 1; i > 0; i--)
+                       {
+                               // the length of the current level
+                               long length = skipStream[0].ReadVLong();
+                               
+                               // the start pointer of the current level
+                               skipPointer[i] = skipStream[0].GetFilePointer();
+                               if (toBuffer > 0)
+                               {
+                                       // buffer this level
+                                       skipStream[i] = new SkipBuffer(skipStream[0], (int) length);
+                                       toBuffer--;
+                               }
+                               else
+                               {
+                                       // clone this stream, it is already at the start of the current level
+                                       skipStream[i] = (IndexInput) skipStream[0].Clone();
+                                       if (inputIsBuffered && length < BufferedIndexInput.BUFFER_SIZE)
+                                       {
+                                               ((BufferedIndexInput) skipStream[i]).SetBufferSize((int) length);
+                                       }
+                                       
+                                       // move base stream beyond the current level
+                                       skipStream[0].Seek(skipStream[0].GetFilePointer() + length);
+                               }
+                       }
+                       
+                       // use base stream for the lowest level
+                       skipPointer[0] = skipStream[0].GetFilePointer();
+               }
+               
+               /// <summary> Subclasses must implement the actual skip data encoding in this method.
+               /// 
+               /// </summary>
+               /// <param name="level">the level skip data shall be read from
+               /// </param>
+               /// <param name="skipStream">the skip stream to read from
+               /// </param>
+               protected internal abstract int ReadSkipData(int level, IndexInput skipStream);
+               
+               /// <summary>Copies the values of the last read skip entry on this level </summary>
+               protected internal virtual void  SetLastSkipData(int level)
+               {
+                       lastDoc = skipDoc[level];
+                       lastChildPointer = childPointer[level];
+               }
+               
+               
+               /// <summary>used to buffer the top skip levels </summary>
+               private sealed class SkipBuffer:IndexInput
+               {
+                       private byte[] data;
+                       private long pointer;
+                       private int pos;
+                       
+                       internal SkipBuffer(IndexInput input, int length)
+                       {
+                               data = new byte[length];
+                               pointer = input.GetFilePointer();
+                               input.ReadBytes(data, 0, length);
+                       }
+                       
+                       public override void  Close()
+                       {
+                               data = null;
+                       }
+                       
+                       public override long GetFilePointer()
+                       {
+                               return pointer + pos;
+                       }
+                       
+                       public override long Length()
+                       {
+                               return data.Length;
+                       }
+                       
+                       public override byte ReadByte()
+                       {
+                               return data[pos++];
+                       }
+                       
+                       public override void  ReadBytes(byte[] b, int offset, int len)
+                       {
+                               Array.Copy(data, pos, b, offset, len);
+                               pos += len;
+                       }
+                       
+                       public override void  Seek(long pos)
+                       {
+                               this.pos = (int) (pos - pointer);
+                       }
+                       
+                       override public System.Object Clone()
+                       {
+                System.Diagnostics.Debug.Fail("Port issue:", "Lets see if we need this FilterIndexReader.Clone()"); // {{Aroush-2.9}}
+                               return null;
+                       }
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/MultiLevelSkipListWriter.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/MultiLevelSkipListWriter.cs
new file mode 100644 (file)
index 0000000..111a16a
--- /dev/null
@@ -0,0 +1,171 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexOutput = Mono.Lucene.Net.Store.IndexOutput;
+using RAMOutputStream = Mono.Lucene.Net.Store.RAMOutputStream;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       /// <summary> This abstract class writes skip lists with multiple levels.
+       /// 
+       /// Example for skipInterval = 3:
+       /// c            (skip level 2)
+       /// c                 c                 c            (skip level 1) 
+       /// x     x     x     x     x     x     x     x     x     x      (skip level 0)
+       /// d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d  (posting list)
+       /// 3     6     9     12    15    18    21    24    27    30     (df)
+       /// 
+       /// d - document
+       /// x - skip data
+       /// c - skip data with child pointer
+       /// 
+       /// Skip level i contains every skipInterval-th entry from skip level i-1.
+       /// Therefore the number of entries on level i is: floor(df / ((skipInterval ^ (i + 1))).
+       /// 
+       /// Each skip entry on a level i>0 contains a pointer to the corresponding skip entry in list i-1.
+       /// This guarantess a logarithmic amount of skips to find the target document.
+       /// 
+       /// While this class takes care of writing the different skip levels,
+       /// subclasses must define the actual format of the skip data.
+       /// 
+       /// </summary>
+       abstract class MultiLevelSkipListWriter
+       {
+               // number of levels in this skip list
+               private int numberOfSkipLevels;
+               
+               // the skip interval in the list with level = 0
+               private int skipInterval;
+               
+               // for every skip level a different buffer is used 
+               private RAMOutputStream[] skipBuffer;
+               
+               protected internal MultiLevelSkipListWriter(int skipInterval, int maxSkipLevels, int df)
+               {
+                       this.skipInterval = skipInterval;
+                       
+                       // calculate the maximum number of skip levels for this document frequency
+                       numberOfSkipLevels = df == 0?0:(int) System.Math.Floor(System.Math.Log(df) / System.Math.Log(skipInterval));
+                       
+                       // make sure it does not exceed maxSkipLevels
+                       if (numberOfSkipLevels > maxSkipLevels)
+                       {
+                               numberOfSkipLevels = maxSkipLevels;
+                       }
+               }
+               
+               protected internal virtual void  Init()
+               {
+                       skipBuffer = new RAMOutputStream[numberOfSkipLevels];
+                       for (int i = 0; i < numberOfSkipLevels; i++)
+                       {
+                               skipBuffer[i] = new RAMOutputStream();
+                       }
+               }
+               
+               protected internal virtual void  ResetSkip()
+               {
+                       // creates new buffers or empties the existing ones
+                       if (skipBuffer == null)
+                       {
+                               Init();
+                       }
+                       else
+                       {
+                               for (int i = 0; i < skipBuffer.Length; i++)
+                               {
+                                       skipBuffer[i].Reset();
+                               }
+                       }
+               }
+               
+               /// <summary> Subclasses must implement the actual skip data encoding in this method.
+               /// 
+               /// </summary>
+               /// <param name="level">the level skip data shall be writting for
+               /// </param>
+               /// <param name="skipBuffer">the skip buffer to write to
+               /// </param>
+               protected internal abstract void  WriteSkipData(int level, IndexOutput skipBuffer);
+               
+               /// <summary> Writes the current skip data to the buffers. The current document frequency determines
+               /// the max level is skip data is to be written to. 
+               /// 
+               /// </summary>
+               /// <param name="df">the current document frequency 
+               /// </param>
+               /// <throws>  IOException </throws>
+               internal virtual void  BufferSkip(int df)
+               {
+                       int numLevels;
+                       
+                       // determine max level
+                       for (numLevels = 0; (df % skipInterval) == 0 && numLevels < numberOfSkipLevels; df /= skipInterval)
+                       {
+                               numLevels++;
+                       }
+                       
+                       long childPointer = 0;
+                       
+                       for (int level = 0; level < numLevels; level++)
+                       {
+                               WriteSkipData(level, skipBuffer[level]);
+                               
+                               long newChildPointer = skipBuffer[level].GetFilePointer();
+                               
+                               if (level != 0)
+                               {
+                                       // store child pointers for all levels except the lowest
+                                       skipBuffer[level].WriteVLong(childPointer);
+                               }
+                               
+                               //remember the childPointer for the next level
+                               childPointer = newChildPointer;
+                       }
+               }
+               
+               /// <summary> Writes the buffered skip lists to the given output.
+               /// 
+               /// </summary>
+               /// <param name="output">the IndexOutput the skip lists shall be written to 
+               /// </param>
+               /// <returns> the pointer the skip list starts
+               /// </returns>
+               internal virtual long WriteSkip(IndexOutput output)
+               {
+                       long skipPointer = output.GetFilePointer();
+                       if (skipBuffer == null || skipBuffer.Length == 0)
+                               return skipPointer;
+                       
+                       for (int level = numberOfSkipLevels - 1; level > 0; level--)
+                       {
+                               long length = skipBuffer[level].GetFilePointer();
+                               if (length > 0)
+                               {
+                                       output.WriteVLong(length);
+                                       skipBuffer[level].WriteTo(output);
+                               }
+                       }
+                       skipBuffer[0].WriteTo(output);
+                       
+                       return skipPointer;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/MultiReader.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/MultiReader.cs
new file mode 100644 (file)
index 0000000..4368a32
--- /dev/null
@@ -0,0 +1,506 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using Document = Mono.Lucene.Net.Documents.Document;
+using FieldSelector = Mono.Lucene.Net.Documents.FieldSelector;
+using MultiTermDocs = Mono.Lucene.Net.Index.DirectoryReader.MultiTermDocs;
+using MultiTermEnum = Mono.Lucene.Net.Index.DirectoryReader.MultiTermEnum;
+using MultiTermPositions = Mono.Lucene.Net.Index.DirectoryReader.MultiTermPositions;
+using DefaultSimilarity = Mono.Lucene.Net.Search.DefaultSimilarity;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       /// <summary>An IndexReader which reads multiple indexes, appending their content.
+       /// 
+       /// </summary>
+       /// <version>  $Id: MultiReader.java 782406 2009-06-07 16:31:18Z mikemccand $
+       /// </version>
+       public class MultiReader:IndexReader, System.ICloneable
+       {
+               protected internal IndexReader[] subReaders;
+               private int[] starts; // 1st docno for each segment
+               private bool[] decrefOnClose; // remember which subreaders to decRef on close
+               private System.Collections.IDictionary normsCache = new System.Collections.Hashtable();
+               private int maxDoc = 0;
+               private int numDocs = - 1;
+               private bool hasDeletions = false;
+               
+               /// <summary> <p/>Construct a MultiReader aggregating the named set of (sub)readers.
+               /// Directory locking for delete, undeleteAll, and setNorm operations is
+               /// left to the subreaders. <p/>
+               /// <p/>Note that all subreaders are closed if this Multireader is closed.<p/>
+               /// </summary>
+               /// <param name="subReaders">set of (sub)readers
+               /// </param>
+               /// <throws>  IOException </throws>
+               public MultiReader(IndexReader[] subReaders)
+               {
+                       Initialize(subReaders, true);
+               }
+               
+               /// <summary> <p/>Construct a MultiReader aggregating the named set of (sub)readers.
+               /// Directory locking for delete, undeleteAll, and setNorm operations is
+               /// left to the subreaders. <p/>
+               /// </summary>
+               /// <param name="closeSubReaders">indicates whether the subreaders should be closed
+               /// when this MultiReader is closed
+               /// </param>
+               /// <param name="subReaders">set of (sub)readers
+               /// </param>
+               /// <throws>  IOException </throws>
+               public MultiReader(IndexReader[] subReaders, bool closeSubReaders)
+               {
+                       Initialize(subReaders, closeSubReaders);
+               }
+               
+               private void  Initialize(IndexReader[] subReaders, bool closeSubReaders)
+               {
+                       this.subReaders = new IndexReader[subReaders.Length];
+                       subReaders.CopyTo(this.subReaders, 0);
+                       starts = new int[subReaders.Length + 1]; // build starts array
+                       decrefOnClose = new bool[subReaders.Length];
+                       for (int i = 0; i < subReaders.Length; i++)
+                       {
+                               starts[i] = maxDoc;
+                               maxDoc += subReaders[i].MaxDoc(); // compute maxDocs
+                               
+                               if (!closeSubReaders)
+                               {
+                                       subReaders[i].IncRef();
+                                       decrefOnClose[i] = true;
+                               }
+                               else
+                               {
+                                       decrefOnClose[i] = false;
+                               }
+                               
+                               if (subReaders[i].HasDeletions())
+                                       hasDeletions = true;
+                       }
+                       starts[subReaders.Length] = maxDoc;
+               }
+               
+               /// <summary> Tries to reopen the subreaders.
+               /// <br/>
+               /// If one or more subreaders could be re-opened (i. e. subReader.reopen() 
+               /// returned a new instance != subReader), then a new MultiReader instance 
+               /// is returned, otherwise this instance is returned.
+               /// <p/>
+               /// A re-opened instance might share one or more subreaders with the old 
+               /// instance. Index modification operations result in undefined behavior
+               /// when performed before the old instance is closed.
+               /// (see {@link IndexReader#Reopen()}).
+               /// <p/>
+               /// If subreaders are shared, then the reference count of those
+               /// readers is increased to ensure that the subreaders remain open
+               /// until the last referring reader is closed.
+               /// 
+               /// </summary>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  IOException if there is a low-level IO error  </throws>
+               public override IndexReader Reopen()
+               {
+                       lock (this)
+                       {
+                               return DoReopen(false);
+                       }
+               }
+               
+               /// <summary> Clones the subreaders.
+               /// (see {@link IndexReader#clone()}).
+               /// <br/>
+               /// <p/>
+               /// If subreaders are shared, then the reference count of those
+               /// readers is increased to ensure that the subreaders remain open
+               /// until the last referring reader is closed.
+               /// </summary>
+               public override System.Object Clone()
+               {
+                       try
+                       {
+                               return DoReopen(true);
+                       }
+                       catch (System.Exception ex)
+                       {
+                               throw new System.SystemException(ex.Message, ex);
+                       }
+               }
+               
+               /// <summary> If clone is true then we clone each of the subreaders</summary>
+               /// <param name="doClone">
+               /// </param>
+               /// <returns> New IndexReader, or same one (this) if
+               /// reopen/clone is not necessary
+               /// </returns>
+               /// <throws>  CorruptIndexException </throws>
+               /// <throws>  IOException </throws>
+               protected internal virtual IndexReader DoReopen(bool doClone)
+               {
+                       EnsureOpen();
+                       
+                       bool reopened = false;
+                       IndexReader[] newSubReaders = new IndexReader[subReaders.Length];
+                       
+                       bool success = false;
+                       try
+                       {
+                               for (int i = 0; i < subReaders.Length; i++)
+                               {
+                                       if (doClone)
+                                               newSubReaders[i] = (IndexReader) subReaders[i].Clone();
+                                       else
+                                               newSubReaders[i] = subReaders[i].Reopen();
+                                       // if at least one of the subreaders was updated we remember that
+                                       // and return a new MultiReader
+                                       if (newSubReaders[i] != subReaders[i])
+                                       {
+                                               reopened = true;
+                                       }
+                               }
+                               success = true;
+                       }
+                       finally
+                       {
+                               if (!success && reopened)
+                               {
+                                       for (int i = 0; i < newSubReaders.Length; i++)
+                                       {
+                                               if (newSubReaders[i] != subReaders[i])
+                                               {
+                                                       try
+                                                       {
+                                                               newSubReaders[i].Close();
+                                                       }
+                                                       catch (System.IO.IOException ignore)
+                                                       {
+                                                               // keep going - we want to clean up as much as possible
+                                                       }
+                                               }
+                                       }
+                               }
+                       }
+                       
+                       if (reopened)
+                       {
+                               bool[] newDecrefOnClose = new bool[subReaders.Length];
+                               for (int i = 0; i < subReaders.Length; i++)
+                               {
+                                       if (newSubReaders[i] == subReaders[i])
+                                       {
+                                               newSubReaders[i].IncRef();
+                                               newDecrefOnClose[i] = true;
+                                       }
+                               }
+                               MultiReader mr = new MultiReader(newSubReaders);
+                               mr.decrefOnClose = newDecrefOnClose;
+                               mr.SetDisableFakeNorms(GetDisableFakeNorms());
+                               return mr;
+                       }
+                       else
+                       {
+                               return this;
+                       }
+               }
+               
+               public override TermFreqVector[] GetTermFreqVectors(int n)
+               {
+                       EnsureOpen();
+                       int i = ReaderIndex(n); // find segment num
+                       return subReaders[i].GetTermFreqVectors(n - starts[i]); // dispatch to segment
+               }
+               
+               public override TermFreqVector GetTermFreqVector(int n, System.String field)
+               {
+                       EnsureOpen();
+                       int i = ReaderIndex(n); // find segment num
+                       return subReaders[i].GetTermFreqVector(n - starts[i], field);
+               }
+               
+               
+               public override void  GetTermFreqVector(int docNumber, System.String field, TermVectorMapper mapper)
+               {
+                       EnsureOpen();
+                       int i = ReaderIndex(docNumber); // find segment num
+                       subReaders[i].GetTermFreqVector(docNumber - starts[i], field, mapper);
+               }
+               
+               public override void  GetTermFreqVector(int docNumber, TermVectorMapper mapper)
+               {
+                       EnsureOpen();
+                       int i = ReaderIndex(docNumber); // find segment num
+                       subReaders[i].GetTermFreqVector(docNumber - starts[i], mapper);
+               }
+               
+               public override bool IsOptimized()
+               {
+                       return false;
+               }
+               
+               public override int NumDocs()
+               {
+                       // Don't call ensureOpen() here (it could affect performance)
+            // NOTE: multiple threads may wind up init'ing
+            // numDocs... but that's harmless
+                       if (numDocs == - 1)
+                       {
+                               // check cache
+                               int n = 0; // cache miss--recompute
+                               for (int i = 0; i < subReaders.Length; i++)
+                                       n += subReaders[i].NumDocs(); // sum from readers
+                               numDocs = n;
+                       }
+                       return numDocs;
+               }
+               
+               public override int MaxDoc()
+               {
+                       // Don't call ensureOpen() here (it could affect performance)
+                       return maxDoc;
+               }
+               
+               // inherit javadoc
+               public override Document Document(int n, FieldSelector fieldSelector)
+               {
+                       EnsureOpen();
+                       int i = ReaderIndex(n); // find segment num
+                       return subReaders[i].Document(n - starts[i], fieldSelector); // dispatch to segment reader
+               }
+               
+               public override bool IsDeleted(int n)
+               {
+                       // Don't call ensureOpen() here (it could affect performance)
+                       int i = ReaderIndex(n); // find segment num
+                       return subReaders[i].IsDeleted(n - starts[i]); // dispatch to segment reader
+               }
+               
+               public override bool HasDeletions()
+               {
+                       // Don't call ensureOpen() here (it could affect performance)
+                       return hasDeletions;
+               }
+               
+               protected internal override void  DoDelete(int n)
+               {
+                       numDocs = - 1; // invalidate cache
+                       int i = ReaderIndex(n); // find segment num
+                       subReaders[i].DeleteDocument(n - starts[i]); // dispatch to segment reader
+                       hasDeletions = true;
+               }
+               
+               protected internal override void  DoUndeleteAll()
+               {
+                       for (int i = 0; i < subReaders.Length; i++)
+                               subReaders[i].UndeleteAll();
+                       
+                       hasDeletions = false;
+                       numDocs = - 1; // invalidate cache
+               }
+               
+               private int ReaderIndex(int n)
+               {
+                       // find reader for doc n:
+                       return DirectoryReader.ReaderIndex(n, this.starts, this.subReaders.Length);
+               }
+               
+               public override bool HasNorms(System.String field)
+               {
+                       EnsureOpen();
+                       for (int i = 0; i < subReaders.Length; i++)
+                       {
+                               if (subReaders[i].HasNorms(field))
+                                       return true;
+                       }
+                       return false;
+               }
+               
+               private byte[] ones;
+               private byte[] FakeNorms()
+               {
+                       if (ones == null)
+                               ones = SegmentReader.CreateFakeNorms(MaxDoc());
+                       return ones;
+               }
+               
+               public override byte[] Norms(System.String field)
+               {
+                       lock (this)
+                       {
+                               EnsureOpen();
+                               byte[] bytes = (byte[]) normsCache[field];
+                               if (bytes != null)
+                                       return bytes; // cache hit
+                               if (!HasNorms(field))
+                                       return GetDisableFakeNorms()?null:FakeNorms();
+                               
+                               bytes = new byte[MaxDoc()];
+                               for (int i = 0; i < subReaders.Length; i++)
+                                       subReaders[i].Norms(field, bytes, starts[i]);
+                               normsCache[field] = bytes; // update cache
+                               return bytes;
+                       }
+               }
+               
+               public override void  Norms(System.String field, byte[] result, int offset)
+               {
+                       lock (this)
+                       {
+                               EnsureOpen();
+                               byte[] bytes = (byte[]) normsCache[field];
+                               for (int i = 0; i < subReaders.Length; i++)
+                               // read from segments
+                                       subReaders[i].Norms(field, result, offset + starts[i]);
+                               
+                               if (bytes == null && !HasNorms(field))
+                               {
+                    for (int i = offset; i < result.Length; i++)
+                    {
+                        result[i] = (byte) DefaultSimilarity.EncodeNorm(1.0f);
+                    }
+                               }
+                               else if (bytes != null)
+                               {
+                                       // cache hit
+                                       Array.Copy(bytes, 0, result, offset, MaxDoc());
+                               }
+                               else
+                               {
+                                       for (int i = 0; i < subReaders.Length; i++)
+                                       {
+                                               // read from segments
+                                               subReaders[i].Norms(field, result, offset + starts[i]);
+                                       }
+                               }
+                       }
+               }
+               
+               protected internal override void  DoSetNorm(int n, System.String field, byte value_Renamed)
+               {
+                       lock (normsCache.SyncRoot)
+                       {
+                               normsCache.Remove(field); // clear cache
+                       }
+                       int i = ReaderIndex(n); // find segment num
+                       subReaders[i].SetNorm(n - starts[i], field, value_Renamed); // dispatch
+               }
+               
+               public override TermEnum Terms()
+               {
+                       EnsureOpen();
+                       return new MultiTermEnum(this, subReaders, starts, null);
+               }
+               
+               public override TermEnum Terms(Term term)
+               {
+                       EnsureOpen();
+                       return new MultiTermEnum(this, subReaders, starts, term);
+               }
+               
+               public override int DocFreq(Term t)
+               {
+                       EnsureOpen();
+                       int total = 0; // sum freqs in segments
+                       for (int i = 0; i < subReaders.Length; i++)
+                               total += subReaders[i].DocFreq(t);
+                       return total;
+               }
+               
+               public override TermDocs TermDocs()
+               {
+                       EnsureOpen();
+                       return new MultiTermDocs(this, subReaders, starts);
+               }
+               
+               public override TermPositions TermPositions()
+               {
+                       EnsureOpen();
+                       return new MultiTermPositions(this, subReaders, starts);
+               }
+               
+               /// <deprecated> 
+               /// </deprecated>
+        [Obsolete]
+               protected internal override void  DoCommit()
+               {
+                       DoCommit(null);
+               }
+
+        protected internal override void DoCommit(System.Collections.Generic.IDictionary<string, string> commitUserData)
+               {
+                       for (int i = 0; i < subReaders.Length; i++)
+                               subReaders[i].Commit(commitUserData);
+               }
+               
+               protected internal override void  DoClose()
+               {
+                       lock (this)
+                       {
+                               for (int i = 0; i < subReaders.Length; i++)
+                               {
+                                       if (decrefOnClose[i])
+                                       {
+                                               subReaders[i].DecRef();
+                                       }
+                                       else
+                                       {
+                                               subReaders[i].Close();
+                                       }
+                               }
+                       }
+
+            // NOTE: only needed in case someone had asked for
+            // FieldCache for top-level reader (which is generally
+            // not a good idea):
+            Mono.Lucene.Net.Search.FieldCache_Fields.DEFAULT.Purge(this);
+               }
+
+        public override System.Collections.Generic.ICollection<string> GetFieldNames(IndexReader.FieldOption fieldNames)
+               {
+                       EnsureOpen();
+                       return DirectoryReader.GetFieldNames(fieldNames, this.subReaders);
+               }
+               
+               /// <summary> Checks recursively if all subreaders are up to date. </summary>
+               public override bool IsCurrent()
+               {
+                       for (int i = 0; i < subReaders.Length; i++)
+                       {
+                               if (!subReaders[i].IsCurrent())
+                               {
+                                       return false;
+                               }
+                       }
+                       
+                       // all subreaders are up to date
+                       return true;
+               }
+               
+               /// <summary>Not implemented.</summary>
+               /// <throws>  UnsupportedOperationException </throws>
+               public override long GetVersion()
+               {
+                       throw new System.NotSupportedException("MultiReader does not support this method.");
+               }
+               
+               public override IndexReader[] GetSequentialSubReaders()
+               {
+                       return subReaders;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/MultipleTermPositions.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/MultipleTermPositions.cs
new file mode 100644 (file)
index 0000000..f2c09d0
--- /dev/null
@@ -0,0 +1,243 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using PriorityQueue = Mono.Lucene.Net.Util.PriorityQueue;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       /// <summary> Allows you to iterate over the {@link TermPositions} for multiple {@link Term}s as
+       /// a single {@link TermPositions}.
+       /// 
+       /// </summary>
+       public class MultipleTermPositions : TermPositions
+       {
+               
+               private sealed class TermPositionsQueue:PriorityQueue
+               {
+                       internal TermPositionsQueue(System.Collections.IList termPositions)
+                       {
+                               Initialize(termPositions.Count);
+                               
+                               System.Collections.IEnumerator i = termPositions.GetEnumerator();
+                               while (i.MoveNext())
+                               {
+                                       TermPositions tp = (TermPositions) i.Current;
+                                       if (tp.Next())
+                                               Put(tp);
+                               }
+                       }
+                       
+                       internal TermPositions Peek()
+                       {
+                               return (TermPositions) Top();
+                       }
+                       
+                       public override bool LessThan(System.Object a, System.Object b)
+                       {
+                               return ((TermPositions) a).Doc() < ((TermPositions) b).Doc();
+                       }
+               }
+               
+               private sealed class IntQueue
+               {
+                       public IntQueue()
+                       {
+                               InitBlock();
+                       }
+                       private void  InitBlock()
+                       {
+                               _array = new int[_arraySize];
+                       }
+                       private int _arraySize = 16;
+                       private int _index = 0;
+                       private int _lastIndex = 0;
+                       private int[] _array;
+                       
+                       internal void  add(int i)
+                       {
+                               if (_lastIndex == _arraySize)
+                                       growArray();
+                               
+                               _array[_lastIndex++] = i;
+                       }
+                       
+                       internal int next()
+                       {
+                               return _array[_index++];
+                       }
+                       
+                       internal void  sort()
+                       {
+                               System.Array.Sort(_array, _index, _lastIndex - _index);
+                       }
+                       
+                       internal void  clear()
+                       {
+                               _index = 0;
+                               _lastIndex = 0;
+                       }
+                       
+                       internal int size()
+                       {
+                               return (_lastIndex - _index);
+                       }
+                       
+                       private void  growArray()
+                       {
+                               int[] newArray = new int[_arraySize * 2];
+                               Array.Copy(_array, 0, newArray, 0, _arraySize);
+                               _array = newArray;
+                               _arraySize *= 2;
+                       }
+               }
+               
+               private int _doc;
+               private int _freq;
+               private TermPositionsQueue _termPositionsQueue;
+               private IntQueue _posList;
+               
+               /// <summary> Creates a new <code>MultipleTermPositions</code> instance.
+               /// 
+               /// </summary>
+               /// <exception cref="IOException">
+               /// </exception>
+               public MultipleTermPositions(IndexReader indexReader, Term[] terms)
+               {
+                       System.Collections.IList termPositions = new System.Collections.ArrayList();
+                       
+                       for (int i = 0; i < terms.Length; i++)
+                               termPositions.Add(indexReader.TermPositions(terms[i]));
+                       
+                       _termPositionsQueue = new TermPositionsQueue(termPositions);
+                       _posList = new IntQueue();
+               }
+               
+               public bool Next()
+               {
+                       if (_termPositionsQueue.Size() == 0)
+                               return false;
+                       
+                       _posList.clear();
+                       _doc = _termPositionsQueue.Peek().Doc();
+                       
+                       TermPositions tp;
+                       do 
+                       {
+                               tp = _termPositionsQueue.Peek();
+                               
+                               for (int i = 0; i < tp.Freq(); i++)
+                                       _posList.add(tp.NextPosition());
+                               
+                               if (tp.Next())
+                                       _termPositionsQueue.AdjustTop();
+                               else
+                               {
+                                       _termPositionsQueue.Pop();
+                                       tp.Close();
+                               }
+                       }
+                       while (_termPositionsQueue.Size() > 0 && _termPositionsQueue.Peek().Doc() == _doc);
+                       
+                       _posList.sort();
+                       _freq = _posList.size();
+                       
+                       return true;
+               }
+               
+               public int NextPosition()
+               {
+                       return _posList.next();
+               }
+               
+               public bool SkipTo(int target)
+               {
+                       while (_termPositionsQueue.Peek() != null && target > _termPositionsQueue.Peek().Doc())
+                       {
+                               TermPositions tp = (TermPositions) _termPositionsQueue.Pop();
+                               if (tp.SkipTo(target))
+                                       _termPositionsQueue.Put(tp);
+                               else
+                                       tp.Close();
+                       }
+                       return Next();
+               }
+               
+               public int Doc()
+               {
+                       return _doc;
+               }
+               
+               public int Freq()
+               {
+                       return _freq;
+               }
+               
+               public void  Close()
+               {
+                       while (_termPositionsQueue.Size() > 0)
+                               ((TermPositions) _termPositionsQueue.Pop()).Close();
+               }
+               
+               /// <summary> Not implemented.</summary>
+               /// <throws>  UnsupportedOperationException </throws>
+               public virtual void  Seek(Term arg0)
+               {
+                       throw new System.NotSupportedException();
+               }
+               
+               /// <summary> Not implemented.</summary>
+               /// <throws>  UnsupportedOperationException </throws>
+               public virtual void  Seek(TermEnum termEnum)
+               {
+                       throw new System.NotSupportedException();
+               }
+               
+               /// <summary> Not implemented.</summary>
+               /// <throws>  UnsupportedOperationException </throws>
+               public virtual int Read(int[] arg0, int[] arg1)
+               {
+                       throw new System.NotSupportedException();
+               }
+               
+               
+               /// <summary> Not implemented.</summary>
+               /// <throws>  UnsupportedOperationException </throws>
+               public virtual int GetPayloadLength()
+               {
+                       throw new System.NotSupportedException();
+               }
+               
+               /// <summary> Not implemented.</summary>
+               /// <throws>  UnsupportedOperationException </throws>
+               public virtual byte[] GetPayload(byte[] data, int offset)
+               {
+                       throw new System.NotSupportedException();
+               }
+               
+               /// <summary> </summary>
+               /// <returns> false
+               /// </returns>
+               // TODO: Remove warning after API has been finalized
+               public virtual bool IsPayloadAvailable()
+               {
+                       return false;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/NormsWriter.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/NormsWriter.cs
new file mode 100644 (file)
index 0000000..ee63df0
--- /dev/null
@@ -0,0 +1,211 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexOutput = Mono.Lucene.Net.Store.IndexOutput;
+using Similarity = Mono.Lucene.Net.Search.Similarity;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       // TODO FI: norms could actually be stored as doc store
+       
+       /// <summary>Writes norms.  Each thread X field accumulates the norms
+       /// for the doc/fields it saw, then the flush method below
+       /// merges all of these together into a single _X.nrm file.
+       /// </summary>
+       
+       sealed class NormsWriter:InvertedDocEndConsumer
+       {
+               
+               private static readonly byte defaultNorm;
+               private FieldInfos fieldInfos;
+               public override InvertedDocEndConsumerPerThread AddThread(DocInverterPerThread docInverterPerThread)
+               {
+                       return new NormsWriterPerThread(docInverterPerThread, this);
+               }
+               
+               public override void  Abort()
+               {
+               }
+               
+               // We only write the _X.nrm file at flush
+               internal void  Files(System.Collections.ICollection files)
+               {
+               }
+               
+               internal override void  SetFieldInfos(FieldInfos fieldInfos)
+               {
+                       this.fieldInfos = fieldInfos;
+               }
+               
+               /// <summary>Produce _X.nrm if any document had a field with norms
+               /// not disabled 
+               /// </summary>
+               public override void  Flush(System.Collections.IDictionary threadsAndFields, SegmentWriteState state)
+               {
+                       
+                       System.Collections.IDictionary byField = new System.Collections.Hashtable();
+                       
+                       // Typically, each thread will have encountered the same
+                       // field.  So first we collate by field, ie, all
+                       // per-thread field instances that correspond to the
+                       // same FieldInfo
+                       System.Collections.IEnumerator it = new System.Collections.Hashtable(threadsAndFields).GetEnumerator();
+                       while (it.MoveNext())
+                       {
+                               System.Collections.DictionaryEntry entry = (System.Collections.DictionaryEntry) it.Current;
+                               
+                               System.Collections.ICollection fields = (System.Collections.ICollection) entry.Value;
+                               System.Collections.IEnumerator fieldsIt = fields.GetEnumerator();
+                System.Collections.ArrayList fieldsToRemove = new System.Collections.ArrayList();
+                               
+                               while (fieldsIt.MoveNext())
+                               {
+                                       NormsWriterPerField perField = (NormsWriterPerField) ((System.Collections.DictionaryEntry) fieldsIt.Current).Key;
+                                       
+                                       if (perField.upto > 0)
+                                       {
+                                               // It has some norms
+                                               System.Collections.IList l = (System.Collections.IList) byField[perField.fieldInfo];
+                                               if (l == null)
+                                               {
+                                                       l = new System.Collections.ArrayList();
+                                                       byField[perField.fieldInfo] = l;
+                                               }
+                                               l.Add(perField);
+                                       }
+                                       // Remove this field since we haven't seen it
+                                       // since the previous flush
+                                       else
+                                       {
+                        fieldsToRemove.Add(perField);
+                                       }
+                               }
+
+                System.Collections.Hashtable fieldsHT = (System.Collections.Hashtable)fields;
+                for (int i = 0; i < fieldsToRemove.Count; i++)
+                {
+                    fieldsHT.Remove(fieldsToRemove[i]);    
+                }
+                       }
+                       
+                       System.String normsFileName = state.segmentName + "." + IndexFileNames.NORMS_EXTENSION;
+                       state.flushedFiles[normsFileName] = normsFileName;
+                       IndexOutput normsOut = state.directory.CreateOutput(normsFileName);
+                       
+                       try
+                       {
+                               normsOut.WriteBytes(SegmentMerger.NORMS_HEADER, 0, SegmentMerger.NORMS_HEADER.Length);
+                               
+                               int numField = fieldInfos.Size();
+                               
+                               int normCount = 0;
+                               
+                               for (int fieldNumber = 0; fieldNumber < numField; fieldNumber++)
+                               {
+                                       
+                                       FieldInfo fieldInfo = fieldInfos.FieldInfo(fieldNumber);
+                                       
+                                       System.Collections.IList toMerge = (System.Collections.IList) byField[fieldInfo];
+                                       int upto = 0;
+                                       if (toMerge != null)
+                                       {
+                                               
+                                               int numFields = toMerge.Count;
+                                               
+                                               normCount++;
+                                               
+                                               NormsWriterPerField[] fields = new NormsWriterPerField[numFields];
+                                               int[] uptos = new int[numFields];
+                                               
+                                               for (int j = 0; j < numFields; j++)
+                                                       fields[j] = (NormsWriterPerField) toMerge[j];
+                                               
+                                               int numLeft = numFields;
+                                               
+                                               while (numLeft > 0)
+                                               {
+                                                       
+                                                       System.Diagnostics.Debug.Assert(uptos [0] < fields [0].docIDs.Length, " uptos[0]=" + uptos [0] + " len=" +(fields [0].docIDs.Length));
+                                                       
+                                                       int minLoc = 0;
+                                                       int minDocID = fields[0].docIDs[uptos[0]];
+                                                       
+                                                       for (int j = 1; j < numLeft; j++)
+                                                       {
+                                                               int docID = fields[j].docIDs[uptos[j]];
+                                                               if (docID < minDocID)
+                                                               {
+                                                                       minDocID = docID;
+                                                                       minLoc = j;
+                                                               }
+                                                       }
+                                                       
+                                                       System.Diagnostics.Debug.Assert(minDocID < state.numDocs);
+                                                       
+                                                       // Fill hole
+                                                       for (; upto < minDocID; upto++)
+                                                               normsOut.WriteByte(defaultNorm);
+                                                       
+                                                       normsOut.WriteByte(fields[minLoc].norms[uptos[minLoc]]);
+                                                       (uptos[minLoc])++;
+                                                       upto++;
+                                                       
+                                                       if (uptos[minLoc] == fields[minLoc].upto)
+                                                       {
+                                                               fields[minLoc].Reset();
+                                                               if (minLoc != numLeft - 1)
+                                                               {
+                                                                       fields[minLoc] = fields[numLeft - 1];
+                                                                       uptos[minLoc] = uptos[numLeft - 1];
+                                                               }
+                                                               numLeft--;
+                                                       }
+                                               }
+                                               
+                                               // Fill final hole with defaultNorm
+                                               for (; upto < state.numDocs; upto++)
+                                                       normsOut.WriteByte(defaultNorm);
+                                       }
+                                       else if (fieldInfo.isIndexed && !fieldInfo.omitNorms)
+                                       {
+                                               normCount++;
+                                               // Fill entire field with default norm:
+                                               for (; upto < state.numDocs; upto++)
+                                                       normsOut.WriteByte(defaultNorm);
+                                       }
+                                       
+                                       System.Diagnostics.Debug.Assert(4 + normCount * state.numDocs == normsOut.GetFilePointer(), ".nrm file size mismatch: expected=" +(4 + normCount * state.numDocs) + " actual=" + normsOut.GetFilePointer());
+                               }
+                       }
+                       finally
+                       {
+                               normsOut.Close();
+                       }
+               }
+               
+               internal override void  CloseDocStore(SegmentWriteState state)
+               {
+               }
+               static NormsWriter()
+               {
+                       defaultNorm = Similarity.EncodeNorm(1.0f);
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/NormsWriterPerField.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/NormsWriterPerField.cs
new file mode 100644 (file)
index 0000000..6da8dbe
--- /dev/null
@@ -0,0 +1,90 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using ArrayUtil = Mono.Lucene.Net.Util.ArrayUtil;
+using Similarity = Mono.Lucene.Net.Search.Similarity;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       /// <summary>Taps into DocInverter, as an InvertedDocEndConsumer,
+       /// which is called at the end of inverting each field.  We
+       /// just look at the length for the field (docState.length)
+       /// and record the norm. 
+       /// </summary>
+       
+       sealed class NormsWriterPerField:InvertedDocEndConsumerPerField, System.IComparable
+       {
+               
+               internal NormsWriterPerThread perThread;
+               internal FieldInfo fieldInfo;
+               internal DocumentsWriter.DocState docState;
+               
+               // Holds all docID/norm pairs we've seen
+               internal int[] docIDs = new int[1];
+               internal byte[] norms = new byte[1];
+               internal int upto;
+               
+               internal FieldInvertState fieldState;
+               
+               public void  Reset()
+               {
+                       // Shrink back if we are overallocated now:
+                       docIDs = ArrayUtil.Shrink(docIDs, upto);
+                       norms = ArrayUtil.Shrink(norms, upto);
+                       upto = 0;
+               }
+               
+               public NormsWriterPerField(DocInverterPerField docInverterPerField, NormsWriterPerThread perThread, FieldInfo fieldInfo)
+               {
+                       this.perThread = perThread;
+                       this.fieldInfo = fieldInfo;
+                       docState = perThread.docState;
+                       fieldState = docInverterPerField.fieldState;
+               }
+               
+               internal override void  Abort()
+               {
+                       upto = 0;
+               }
+               
+               public int CompareTo(System.Object other)
+               {
+                       return String.CompareOrdinal(fieldInfo.name, ((NormsWriterPerField) other).fieldInfo.name);
+               }
+               
+               internal override void  Finish()
+               {
+                       System.Diagnostics.Debug.Assert(docIDs.Length == norms.Length);
+                       if (fieldInfo.isIndexed && !fieldInfo.omitNorms)
+                       {
+                               if (docIDs.Length <= upto)
+                               {
+                                       System.Diagnostics.Debug.Assert(docIDs.Length == upto);
+                                       docIDs = ArrayUtil.Grow(docIDs, 1 + upto);
+                                       norms = ArrayUtil.Grow(norms, 1 + upto);
+                               }
+                               float norm = docState.similarity.ComputeNorm(fieldInfo.name, fieldState);
+                               norms[upto] = Similarity.EncodeNorm(norm);
+                               docIDs[upto] = docState.docID;
+                               upto++;
+                       }
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/NormsWriterPerThread.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/NormsWriterPerThread.cs
new file mode 100644 (file)
index 0000000..bc48d71
--- /dev/null
@@ -0,0 +1,55 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       sealed class NormsWriterPerThread:InvertedDocEndConsumerPerThread
+       {
+               internal NormsWriter normsWriter;
+               internal DocumentsWriter.DocState docState;
+               
+               public NormsWriterPerThread(DocInverterPerThread docInverterPerThread, NormsWriter normsWriter)
+               {
+                       this.normsWriter = normsWriter;
+                       docState = docInverterPerThread.docState;
+               }
+               
+               internal override InvertedDocEndConsumerPerField AddField(DocInverterPerField docInverterPerField, FieldInfo fieldInfo)
+               {
+                       return new NormsWriterPerField(docInverterPerField, this, fieldInfo);
+               }
+               
+               internal override void  Abort()
+               {
+               }
+               
+               internal override void  StartDocument()
+               {
+               }
+               internal override void  FinishDocument()
+               {
+               }
+               
+               internal bool FreeRAM()
+               {
+                       return false;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/Package.html b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/Package.html
new file mode 100644 (file)
index 0000000..6cc3abe
--- /dev/null
@@ -0,0 +1,25 @@
+<!doctype html public "-//w3c//dtd html 4.0 transitional//en">\r
+<!--\r
+ Licensed to the Apache Software Foundation (ASF) under one or more\r
+ contributor license agreements.  See the NOTICE file distributed with\r
+ this work for additional information regarding copyright ownership.\r
+ The ASF licenses this file to You under the Apache License, Version 2.0\r
+ (the "License"); you may not use this file except in compliance with\r
+ the License.  You may obtain a copy of the License at\r
+\r
+     http://www.apache.org/licenses/LICENSE-2.0\r
+\r
+ Unless required by applicable law or agreed to in writing, software\r
+ distributed under the License is distributed on an "AS IS" BASIS,\r
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
+ See the License for the specific language governing permissions and\r
+ limitations under the License.\r
+-->\r
+<html>\r
+<head>\r
+   <meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">\r
+</head>\r
+<body>\r
+Code to maintain and access indices.\r
+</body>\r
+</html>\r
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/ParallelReader.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/ParallelReader.cs
new file mode 100644 (file)
index 0000000..f2c09c2
--- /dev/null
@@ -0,0 +1,807 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using Document = Mono.Lucene.Net.Documents.Document;
+using FieldSelector = Mono.Lucene.Net.Documents.FieldSelector;
+using FieldSelectorResult = Mono.Lucene.Net.Documents.FieldSelectorResult;
+using Fieldable = Mono.Lucene.Net.Documents.Fieldable;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       
+       /// <summary>An IndexReader which reads multiple, parallel indexes.  Each index added
+       /// must have the same number of documents, but typically each contains
+       /// different fields.  Each document contains the union of the fields of all
+       /// documents with the same document number.  When searching, matches for a
+       /// query term are from the first index added that has the field.
+       /// 
+       /// <p/>This is useful, e.g., with collections that have large fields which
+       /// change rarely and small fields that change more frequently.  The smaller
+       /// fields may be re-indexed in a new index and both indexes may be searched
+       /// together.
+       /// 
+       /// <p/><strong>Warning:</strong> It is up to you to make sure all indexes
+       /// are created and modified the same way. For example, if you add
+       /// documents to one index, you need to add the same documents in the
+       /// same order to the other indexes. <em>Failure to do so will result in
+       /// undefined behavior</em>.
+       /// </summary>
+       public class ParallelReader:IndexReader, System.ICloneable
+       {
+               private System.Collections.ArrayList readers = new System.Collections.ArrayList();
+               private System.Collections.IList decrefOnClose = new System.Collections.ArrayList(); // remember which subreaders to decRef on close
+               internal bool incRefReaders = false;
+               private System.Collections.SortedList fieldToReader = new System.Collections.SortedList();
+               private System.Collections.IDictionary readerToFields = new System.Collections.Hashtable();
+               private System.Collections.IList storedFieldReaders = new System.Collections.ArrayList();
+               
+               private int maxDoc;
+               private int numDocs;
+               private bool hasDeletions;
+               
+               /// <summary>Construct a ParallelReader. 
+               /// <p/>Note that all subreaders are closed if this ParallelReader is closed.<p/>
+               /// </summary>
+               public ParallelReader():this(true)
+               {
+               }
+               
+               /// <summary>Construct a ParallelReader. </summary>
+               /// <param name="closeSubReaders">indicates whether the subreaders should be closed
+               /// when this ParallelReader is closed
+               /// </param>
+               public ParallelReader(bool closeSubReaders):base()
+               {
+                       this.incRefReaders = !closeSubReaders;
+               }
+               
+               /// <summary>Add an IndexReader.</summary>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               public virtual void  Add(IndexReader reader)
+               {
+                       EnsureOpen();
+                       Add(reader, false);
+               }
+               
+               /// <summary>Add an IndexReader whose stored fields will not be returned.  This can
+               /// accellerate search when stored fields are only needed from a subset of
+               /// the IndexReaders.
+               /// 
+               /// </summary>
+               /// <throws>  IllegalArgumentException if not all indexes contain the same number </throws>
+               /// <summary>     of documents
+               /// </summary>
+               /// <throws>  IllegalArgumentException if not all indexes have the same value </throws>
+               /// <summary>     of {@link IndexReader#MaxDoc()}
+               /// </summary>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               public virtual void  Add(IndexReader reader, bool ignoreStoredFields)
+               {
+                       
+                       EnsureOpen();
+                       if (readers.Count == 0)
+                       {
+                               this.maxDoc = reader.MaxDoc();
+                               this.numDocs = reader.NumDocs();
+                               this.hasDeletions = reader.HasDeletions();
+                       }
+                       
+                       if (reader.MaxDoc() != maxDoc)
+                       // check compatibility
+                               throw new System.ArgumentException("All readers must have same maxDoc: " + maxDoc + "!=" + reader.MaxDoc());
+                       if (reader.NumDocs() != numDocs)
+                               throw new System.ArgumentException("All readers must have same numDocs: " + numDocs + "!=" + reader.NumDocs());
+                       
+                       System.Collections.Generic.ICollection<string> fields = reader.GetFieldNames(IndexReader.FieldOption.ALL);
+                       readerToFields[reader] = fields;
+                       System.Collections.IEnumerator i = fields.GetEnumerator();
+                       while (i.MoveNext())
+                       {
+                               // update fieldToReader map
+                               System.String field = (System.String) i.Current;
+                               if (fieldToReader[field] == null)
+                                       fieldToReader[field] = reader;
+                       }
+                       
+                       if (!ignoreStoredFields)
+                               storedFieldReaders.Add(reader); // add to storedFieldReaders
+                       readers.Add(reader);
+                       
+                       if (incRefReaders)
+                       {
+                               reader.IncRef();
+                       }
+                       decrefOnClose.Add(incRefReaders);
+               }
+               
+               public override System.Object Clone()
+               {
+                       try
+                       {
+                               return DoReopen(true);
+                       }
+                       catch (System.Exception ex)
+                       {
+                               throw new System.SystemException(ex.Message, ex);
+                       }
+               }
+               
+               /// <summary> Tries to reopen the subreaders.
+               /// <br/>
+               /// If one or more subreaders could be re-opened (i. e. subReader.reopen() 
+               /// returned a new instance != subReader), then a new ParallelReader instance 
+               /// is returned, otherwise this instance is returned.
+               /// <p/>
+               /// A re-opened instance might share one or more subreaders with the old 
+               /// instance. Index modification operations result in undefined behavior
+               /// when performed before the old instance is closed.
+               /// (see {@link IndexReader#Reopen()}).
+               /// <p/>
+               /// If subreaders are shared, then the reference count of those
+               /// readers is increased to ensure that the subreaders remain open
+               /// until the last referring reader is closed.
+               /// 
+               /// </summary>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  IOException if there is a low-level IO error  </throws>
+               public override IndexReader Reopen()
+               {
+                       lock (this)
+                       {
+                               return DoReopen(false);
+                       }
+               }
+               
+               protected internal virtual IndexReader DoReopen(bool doClone)
+               {
+                       EnsureOpen();
+                       
+                       bool reopened = false;
+                       System.Collections.IList newReaders = new System.Collections.ArrayList();
+                       
+                       bool success = false;
+                       
+                       try
+                       {
+                               for (int i = 0; i < readers.Count; i++)
+                               {
+                                       IndexReader oldReader = (IndexReader) readers[i];
+                                       IndexReader newReader = null;
+                                       if (doClone)
+                                       {
+                                               newReader = (IndexReader) oldReader.Clone();
+                                       }
+                                       else
+                                       {
+                                               newReader = oldReader.Reopen();
+                                       }
+                                       newReaders.Add(newReader);
+                                       // if at least one of the subreaders was updated we remember that
+                                       // and return a new ParallelReader
+                                       if (newReader != oldReader)
+                                       {
+                                               reopened = true;
+                                       }
+                               }
+                               success = true;
+                       }
+                       finally
+                       {
+                               if (!success && reopened)
+                               {
+                                       for (int i = 0; i < newReaders.Count; i++)
+                                       {
+                                               IndexReader r = (IndexReader) newReaders[i];
+                                               if (r != readers[i])
+                                               {
+                                                       try
+                                                       {
+                                                               r.Close();
+                                                       }
+                                                       catch (System.IO.IOException ignore)
+                                                       {
+                                                               // keep going - we want to clean up as much as possible
+                                                       }
+                                               }
+                                       }
+                               }
+                       }
+                       
+                       if (reopened)
+                       {
+                               System.Collections.IList newDecrefOnClose = new System.Collections.ArrayList();
+                               ParallelReader pr = new ParallelReader();
+                               for (int i = 0; i < readers.Count; i++)
+                               {
+                                       IndexReader oldReader = (IndexReader) readers[i];
+                                       IndexReader newReader = (IndexReader) newReaders[i];
+                                       if (newReader == oldReader)
+                                       {
+                                               newDecrefOnClose.Add(true);
+                                               newReader.IncRef();
+                                       }
+                                       else
+                                       {
+                                               // this is a new subreader instance, so on close() we don't
+                                               // decRef but close it 
+                                               newDecrefOnClose.Add(false);
+                                       }
+                                       pr.Add(newReader, !storedFieldReaders.Contains(oldReader));
+                               }
+                               pr.decrefOnClose = newDecrefOnClose;
+                               pr.incRefReaders = incRefReaders;
+                               return pr;
+                       }
+                       else
+                       {
+                               // No subreader was refreshed
+                               return this;
+                       }
+               }
+               
+               
+               public override int NumDocs()
+               {
+                       // Don't call ensureOpen() here (it could affect performance)
+                       return numDocs;
+               }
+               
+               public override int MaxDoc()
+               {
+                       // Don't call ensureOpen() here (it could affect performance)
+                       return maxDoc;
+               }
+               
+               public override bool HasDeletions()
+               {
+                       // Don't call ensureOpen() here (it could affect performance)
+                       return hasDeletions;
+               }
+               
+               // check first reader
+               public override bool IsDeleted(int n)
+               {
+                       // Don't call ensureOpen() here (it could affect performance)
+                       if (readers.Count > 0)
+                               return ((IndexReader) readers[0]).IsDeleted(n);
+                       return false;
+               }
+               
+               // delete in all readers
+               protected internal override void  DoDelete(int n)
+               {
+                       for (int i = 0; i < readers.Count; i++)
+                       {
+                               ((IndexReader) readers[i]).DeleteDocument(n);
+                       }
+                       hasDeletions = true;
+               }
+               
+               // undeleteAll in all readers
+               protected internal override void  DoUndeleteAll()
+               {
+                       for (int i = 0; i < readers.Count; i++)
+                       {
+                               ((IndexReader) readers[i]).UndeleteAll();
+                       }
+                       hasDeletions = false;
+               }
+               
+               // append fields from storedFieldReaders
+               public override Document Document(int n, FieldSelector fieldSelector)
+               {
+                       EnsureOpen();
+                       Document result = new Document();
+                       for (int i = 0; i < storedFieldReaders.Count; i++)
+                       {
+                               IndexReader reader = (IndexReader) storedFieldReaders[i];
+                               
+                               bool include = (fieldSelector == null);
+                               if (!include)
+                               {
+                                       System.Collections.IEnumerator it = ((System.Collections.ICollection) readerToFields[reader]).GetEnumerator();
+                                       while (it.MoveNext())
+                                       {
+                                               if (fieldSelector.Accept((System.String) it.Current) != FieldSelectorResult.NO_LOAD)
+                                               {
+                                                       include = true;
+                                                       break;
+                                               }
+                                       }
+                               }
+                               if (include)
+                               {
+                                       System.Collections.IEnumerator fieldIterator = reader.Document(n, fieldSelector).GetFields().GetEnumerator();
+                                       while (fieldIterator.MoveNext())
+                                       {
+                                               result.Add((Fieldable) fieldIterator.Current);
+                                       }
+                               }
+                       }
+                       return result;
+               }
+               
+               // get all vectors
+               public override TermFreqVector[] GetTermFreqVectors(int n)
+               {
+                       EnsureOpen();
+                       System.Collections.ArrayList results = new System.Collections.ArrayList();
+            System.Collections.IEnumerator i = new System.Collections.Hashtable(fieldToReader).GetEnumerator();
+                       while (i.MoveNext())
+                       {
+                               System.Collections.DictionaryEntry e = (System.Collections.DictionaryEntry) i.Current;
+                               System.String field = (System.String) e.Key;
+                               IndexReader reader = (IndexReader) e.Value;
+                               TermFreqVector vector = reader.GetTermFreqVector(n, field);
+                               if (vector != null)
+                                       results.Add(vector);
+                       }
+                       return (TermFreqVector[]) results.ToArray(typeof(TermFreqVector));
+               }
+               
+               public override TermFreqVector GetTermFreqVector(int n, System.String field)
+               {
+                       EnsureOpen();
+                       IndexReader reader = ((IndexReader) fieldToReader[field]);
+                       return reader == null?null:reader.GetTermFreqVector(n, field);
+               }
+               
+               
+               public override void  GetTermFreqVector(int docNumber, System.String field, TermVectorMapper mapper)
+               {
+                       EnsureOpen();
+                       IndexReader reader = ((IndexReader) fieldToReader[field]);
+                       if (reader != null)
+                       {
+                               reader.GetTermFreqVector(docNumber, field, mapper);
+                       }
+               }
+               
+               public override void  GetTermFreqVector(int docNumber, TermVectorMapper mapper)
+               {
+                       EnsureOpen();
+
+            System.Collections.IEnumerator i = new System.Collections.Hashtable(fieldToReader).GetEnumerator();
+                       while (i.MoveNext())
+                       {
+                               System.Collections.DictionaryEntry e = (System.Collections.DictionaryEntry) i.Current;
+                               System.String field = (System.String) e.Key;
+                               IndexReader reader = (IndexReader) e.Value;
+                               reader.GetTermFreqVector(docNumber, field, mapper);
+                       }
+               }
+               
+               public override bool HasNorms(System.String field)
+               {
+                       EnsureOpen();
+                       IndexReader reader = ((IndexReader) fieldToReader[field]);
+                       return reader == null?false:reader.HasNorms(field);
+               }
+               
+               public override byte[] Norms(System.String field)
+               {
+                       EnsureOpen();
+                       IndexReader reader = ((IndexReader) fieldToReader[field]);
+                       return reader == null?null:reader.Norms(field);
+               }
+               
+               public override void  Norms(System.String field, byte[] result, int offset)
+               {
+                       EnsureOpen();
+                       IndexReader reader = ((IndexReader) fieldToReader[field]);
+                       if (reader != null)
+                               reader.Norms(field, result, offset);
+               }
+               
+               protected internal override void  DoSetNorm(int n, System.String field, byte value_Renamed)
+               {
+                       IndexReader reader = ((IndexReader) fieldToReader[field]);
+                       if (reader != null)
+                               reader.DoSetNorm(n, field, value_Renamed);
+               }
+               
+               public override TermEnum Terms()
+               {
+                       EnsureOpen();
+                       return new ParallelTermEnum(this);
+               }
+               
+               public override TermEnum Terms(Term term)
+               {
+                       EnsureOpen();
+                       return new ParallelTermEnum(this, term);
+               }
+               
+               public override int DocFreq(Term term)
+               {
+                       EnsureOpen();
+                       IndexReader reader = ((IndexReader) fieldToReader[term.Field()]);
+                       return reader == null?0:reader.DocFreq(term);
+               }
+               
+               public override TermDocs TermDocs(Term term)
+               {
+                       EnsureOpen();
+                       return new ParallelTermDocs(this, term);
+               }
+               
+               public override TermDocs TermDocs()
+               {
+                       EnsureOpen();
+                       return new ParallelTermDocs(this);
+               }
+               
+               public override TermPositions TermPositions(Term term)
+               {
+                       EnsureOpen();
+                       return new ParallelTermPositions(this, term);
+               }
+               
+               public override TermPositions TermPositions()
+               {
+                       EnsureOpen();
+                       return new ParallelTermPositions(this);
+               }
+               
+               /// <summary> Checks recursively if all subreaders are up to date. </summary>
+               public override bool IsCurrent()
+               {
+                       for (int i = 0; i < readers.Count; i++)
+                       {
+                               if (!((IndexReader) readers[i]).IsCurrent())
+                               {
+                                       return false;
+                               }
+                       }
+                       
+                       // all subreaders are up to date
+                       return true;
+               }
+               
+               /// <summary> Checks recursively if all subindexes are optimized </summary>
+               public override bool IsOptimized()
+               {
+                       for (int i = 0; i < readers.Count; i++)
+                       {
+                               if (!((IndexReader) readers[i]).IsOptimized())
+                               {
+                                       return false;
+                               }
+                       }
+                       
+                       // all subindexes are optimized
+                       return true;
+               }
+               
+               
+               /// <summary>Not implemented.</summary>
+               /// <throws>  UnsupportedOperationException </throws>
+               public override long GetVersion()
+               {
+                       throw new System.NotSupportedException("ParallelReader does not support this method.");
+               }
+               
+               // for testing
+               public /*internal*/ virtual IndexReader[] GetSubReaders()
+               {
+                       return (IndexReader[]) readers.ToArray(typeof(IndexReader));
+               }
+               
+               /// <deprecated> 
+               /// </deprecated>
+        [Obsolete]
+               protected internal override void  DoCommit()
+               {
+                       DoCommit(null);
+               }
+
+        protected internal override void DoCommit(System.Collections.Generic.IDictionary<string, string> commitUserData)
+               {
+                       for (int i = 0; i < readers.Count; i++)
+                               ((IndexReader) readers[i]).Commit(commitUserData);
+               }
+               
+               protected internal override void  DoClose()
+               {
+                       lock (this)
+                       {
+                               for (int i = 0; i < readers.Count; i++)
+                               {
+                                       if (((System.Boolean) decrefOnClose[i]))
+                                       {
+                                               ((IndexReader) readers[i]).DecRef();
+                                       }
+                                       else
+                                       {
+                                               ((IndexReader) readers[i]).Close();
+                                       }
+                               }
+                       }
+
+            Mono.Lucene.Net.Search.FieldCache_Fields.DEFAULT.Purge(this);
+               }
+
+        public override System.Collections.Generic.ICollection<string> GetFieldNames(IndexReader.FieldOption fieldNames)
+               {
+                       EnsureOpen();
+            System.Collections.Generic.List<string> fieldSet = new System.Collections.Generic.List<string>();
+                       for (int i = 0; i < readers.Count; i++)
+                       {
+                               IndexReader reader = ((IndexReader) readers[i]);
+                               System.Collections.Generic.ICollection<string> names = reader.GetFieldNames(fieldNames);
+                fieldSet.AddRange(names);
+                       }
+                       return fieldSet;
+               }
+               
+               private class ParallelTermEnum:TermEnum
+               {
+                       private void  InitBlock(ParallelReader enclosingInstance)
+                       {
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private ParallelReader enclosingInstance;
+                       public ParallelReader Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       private System.String field;
+                       private System.Collections.IEnumerator fieldIterator;
+                       private TermEnum termEnum;
+                       
+                       public ParallelTermEnum(ParallelReader enclosingInstance)
+                       {
+                               InitBlock(enclosingInstance);
+                               try
+                               {
+                                       field = ((System.String) Enclosing_Instance.fieldToReader.GetKey(0));
+                               }
+                               catch (ArgumentOutOfRangeException e)
+                               {
+                                       // No fields, so keep field == null, termEnum == null
+                                       return;
+                               }
+                               if (field != null)
+                                       termEnum = ((IndexReader) Enclosing_Instance.fieldToReader[field]).Terms();
+                       }
+                       
+                       public ParallelTermEnum(ParallelReader enclosingInstance, Term term)
+                       {
+                               InitBlock(enclosingInstance);
+                               field = term.Field();
+                               IndexReader reader = ((IndexReader) Enclosing_Instance.fieldToReader[field]);
+                               if (reader != null)
+                                       termEnum = reader.Terms(term);
+                       }
+                       
+                       public override bool Next()
+                       {
+                               if (termEnum == null)
+                                       return false;
+                               
+                               // another term in this field?
+                               if (termEnum.Next() && (System.Object) termEnum.Term().Field() == (System.Object) field)
+                                       return true; // yes, keep going
+                               
+                               termEnum.Close(); // close old termEnum
+                               
+                               // find the next field with terms, if any
+                               if (fieldIterator == null)
+                               {
+                    System.Collections.Comparer comparer = System.Collections.Comparer.Default;
+                    System.Collections.SortedList newList = new System.Collections.SortedList();
+                    if (Enclosing_Instance.fieldToReader != null)
+                    {
+                        if (Enclosing_Instance.fieldToReader.Count > 0)
+                        {
+                            int index = 0;
+                            while (comparer.Compare(Enclosing_Instance.fieldToReader.GetKey(index), field) < 0)
+                                index++;
+                            for (; index < Enclosing_Instance.fieldToReader.Count; index++)
+                            {
+                                newList.Add(Enclosing_Instance.fieldToReader.GetKey(index), Enclosing_Instance.fieldToReader[Enclosing_Instance.fieldToReader.GetKey(index)]);
+                            }
+                        }
+                    }
+
+                    fieldIterator = newList.Keys.GetEnumerator();
+                    fieldIterator.MoveNext();
+                                       System.Object generatedAux = fieldIterator.Current; // Skip field to get next one
+                               }
+                               while (fieldIterator.MoveNext())
+                               {
+                                       field = ((System.String) fieldIterator.Current);
+                                       termEnum = ((IndexReader) Enclosing_Instance.fieldToReader[field]).Terms(new Term(field));
+                                       Term term = termEnum.Term();
+                                       if (term != null && (System.Object) term.Field() == (System.Object) field)
+                                               return true;
+                                       else
+                                               termEnum.Close();
+                               }
+                               
+                               return false; // no more fields
+                       }
+                       
+                       public override Term Term()
+                       {
+                               if (termEnum == null)
+                                       return null;
+                               
+                               return termEnum.Term();
+                       }
+                       
+                       public override int DocFreq()
+                       {
+                               if (termEnum == null)
+                                       return 0;
+                               
+                               return termEnum.DocFreq();
+                       }
+                       
+                       public override void  Close()
+                       {
+                               if (termEnum != null)
+                                       termEnum.Close();
+                       }
+               }
+               
+               // wrap a TermDocs in order to support seek(Term)
+               private class ParallelTermDocs : TermDocs
+               {
+                       private void  InitBlock(ParallelReader enclosingInstance)
+                       {
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private ParallelReader enclosingInstance;
+                       public ParallelReader Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       protected internal TermDocs termDocs;
+                       
+                       public ParallelTermDocs(ParallelReader enclosingInstance)
+                       {
+                               InitBlock(enclosingInstance);
+                       }
+                       public ParallelTermDocs(ParallelReader enclosingInstance, Term term)
+                       {
+                               InitBlock(enclosingInstance);
+                               if (term == null)
+                                       termDocs = (Enclosing_Instance.readers.Count == 0)?null:((IndexReader) Enclosing_Instance.readers[0]).TermDocs(null);
+                               else
+                                       Seek(term);
+                       }
+                       
+                       public virtual int Doc()
+                       {
+                               return termDocs.Doc();
+                       }
+                       public virtual int Freq()
+                       {
+                               return termDocs.Freq();
+                       }
+                       
+                       public virtual void  Seek(Term term)
+                       {
+                               IndexReader reader = ((IndexReader) Enclosing_Instance.fieldToReader[term.Field()]);
+                               termDocs = reader != null?reader.TermDocs(term):null;
+                       }
+                       
+                       public virtual void  Seek(TermEnum termEnum)
+                       {
+                               Seek(termEnum.Term());
+                       }
+                       
+                       public virtual bool Next()
+                       {
+                               if (termDocs == null)
+                                       return false;
+                               
+                               return termDocs.Next();
+                       }
+                       
+                       public virtual int Read(int[] docs, int[] freqs)
+                       {
+                               if (termDocs == null)
+                                       return 0;
+                               
+                               return termDocs.Read(docs, freqs);
+                       }
+                       
+                       public virtual bool SkipTo(int target)
+                       {
+                               if (termDocs == null)
+                                       return false;
+                               
+                               return termDocs.SkipTo(target);
+                       }
+                       
+                       public virtual void  Close()
+                       {
+                               if (termDocs != null)
+                                       termDocs.Close();
+                       }
+               }
+               
+               private class ParallelTermPositions:ParallelTermDocs, TermPositions
+               {
+                       private void  InitBlock(ParallelReader enclosingInstance)
+                       {
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private ParallelReader enclosingInstance;
+                       public new ParallelReader Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       
+                       public ParallelTermPositions(ParallelReader enclosingInstance):base(enclosingInstance)
+                       {
+                               InitBlock(enclosingInstance);
+                       }
+                       public ParallelTermPositions(ParallelReader enclosingInstance, Term term):base(enclosingInstance)
+                       {
+                               InitBlock(enclosingInstance);
+                               Seek(term);
+                       }
+                       
+                       public override void  Seek(Term term)
+                       {
+                               IndexReader reader = ((IndexReader) Enclosing_Instance.fieldToReader[term.Field()]);
+                               termDocs = reader != null?reader.TermPositions(term):null;
+                       }
+                       
+                       public virtual int NextPosition()
+                       {
+                               // It is an error to call this if there is no next position, e.g. if termDocs==null
+                               return ((TermPositions) termDocs).NextPosition();
+                       }
+                       
+                       public virtual int GetPayloadLength()
+                       {
+                               return ((TermPositions) termDocs).GetPayloadLength();
+                       }
+                       
+                       public virtual byte[] GetPayload(byte[] data, int offset)
+                       {
+                               return ((TermPositions) termDocs).GetPayload(data, offset);
+                       }
+                       
+                       
+                       // TODO: Remove warning after API has been finalized
+                       public virtual bool IsPayloadAvailable()
+                       {
+                               return ((TermPositions) termDocs).IsPayloadAvailable();
+                       }
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/Payload.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/Payload.cs
new file mode 100644 (file)
index 0000000..79a2162
--- /dev/null
@@ -0,0 +1,218 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using TokenStream = Mono.Lucene.Net.Analysis.TokenStream;
+using ArrayUtil = Mono.Lucene.Net.Util.ArrayUtil;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       /// <summary>  A Payload is metadata that can be stored together with each occurrence 
+       /// of a term. This metadata is stored inline in the posting list of the
+       /// specific term.  
+       /// <p/>
+       /// To store payloads in the index a {@link TokenStream} has to be used that
+       /// produces payload data.
+       /// <p/>
+       /// Use {@link TermPositions#GetPayloadLength()} and {@link TermPositions#GetPayload(byte[], int)}
+       /// to retrieve the payloads from the index.<br/>
+       /// 
+       /// </summary>
+       [Serializable]
+       public class Payload : System.ICloneable
+       {
+               /// <summary>the byte array containing the payload data </summary>
+               protected internal byte[] data;
+               
+               /// <summary>the offset within the byte array </summary>
+               protected internal int offset;
+               
+               /// <summary>the length of the payload data </summary>
+               protected internal int length;
+               
+               /// <summary>Creates an empty payload and does not allocate a byte array. </summary>
+               public Payload()
+               {
+                       // nothing to do
+               }
+               
+               /// <summary> Creates a new payload with the the given array as data.
+               /// A reference to the passed-in array is held, i. e. no 
+               /// copy is made.
+               /// 
+               /// </summary>
+               /// <param name="data">the data of this payload
+               /// </param>
+               public Payload(byte[] data):this(data, 0, data.Length)
+               {
+               }
+               
+               /// <summary> Creates a new payload with the the given array as data. 
+               /// A reference to the passed-in array is held, i. e. no 
+               /// copy is made.
+               /// 
+               /// </summary>
+               /// <param name="data">the data of this payload
+               /// </param>
+               /// <param name="offset">the offset in the data byte array
+               /// </param>
+               /// <param name="length">the length of the data
+               /// </param>
+               public Payload(byte[] data, int offset, int length)
+               {
+                       if (offset < 0 || offset + length > data.Length)
+                       {
+                               throw new System.ArgumentException();
+                       }
+                       this.data = data;
+                       this.offset = offset;
+                       this.length = length;
+               }
+               
+               /// <summary> Sets this payloads data. 
+               /// A reference to the passed-in array is held, i. e. no 
+               /// copy is made.
+               /// </summary>
+               public virtual void  SetData(byte[] data)
+               {
+                       SetData(data, 0, data.Length);
+               }
+               
+               /// <summary> Sets this payloads data. 
+               /// A reference to the passed-in array is held, i. e. no 
+               /// copy is made.
+               /// </summary>
+               public virtual void  SetData(byte[] data, int offset, int length)
+               {
+                       this.data = data;
+                       this.offset = offset;
+                       this.length = length;
+               }
+               
+               /// <summary> Returns a reference to the underlying byte array
+               /// that holds this payloads data.
+               /// </summary>
+               public virtual byte[] GetData()
+               {
+                       return this.data;
+               }
+               
+               /// <summary> Returns the offset in the underlying byte array </summary>
+               public virtual int GetOffset()
+               {
+                       return this.offset;
+               }
+               
+               /// <summary> Returns the length of the payload data. </summary>
+               public virtual int Length()
+               {
+                       return this.length;
+               }
+               
+               /// <summary> Returns the byte at the given index.</summary>
+               public virtual byte ByteAt(int index)
+               {
+                       if (0 <= index && index < this.length)
+                       {
+                               return this.data[this.offset + index];
+                       }
+                       throw new System. IndexOutOfRangeException("Index of bound " + index);
+               }
+               
+               /// <summary> Allocates a new byte array, copies the payload data into it and returns it. </summary>
+               public virtual byte[] ToByteArray()
+               {
+                       byte[] retArray = new byte[this.length];
+                       Array.Copy(this.data, this.offset, retArray, 0, this.length);
+                       return retArray;
+               }
+               
+               /// <summary> Copies the payload data to a byte array.
+               /// 
+               /// </summary>
+               /// <param name="target">the target byte array
+               /// </param>
+               /// <param name="targetOffset">the offset in the target byte array
+               /// </param>
+               public virtual void  CopyTo(byte[] target, int targetOffset)
+               {
+                       if (this.length > target.Length + targetOffset)
+                       {
+                               throw new System.IndexOutOfRangeException();
+                       }
+                       Array.Copy(this.data, this.offset, target, targetOffset, this.length);
+               }
+               
+               /// <summary> Clones this payload by creating a copy of the underlying
+               /// byte array.
+               /// </summary>
+               public virtual System.Object Clone()
+               {
+                       try
+                       {
+                               // Start with a shallow copy of data
+                               Payload clone = (Payload) base.MemberwiseClone();
+                               // Only copy the part of data that belongs to this Payload
+                               if (offset == 0 && length == data.Length)
+                               {
+                                       // It is the whole thing, so just clone it.
+                                       clone.data = new byte[data.Length];
+                                       data.CopyTo(clone.data, 0);
+                               }
+                               else
+                               {
+                                       // Just get the part
+                                       clone.data = this.ToByteArray();
+                                       clone.offset = 0;
+                               }
+                               return clone;
+                       }
+                       catch (System.Exception e)
+                       {
+                               throw new System.SystemException(e.Message, e); // shouldn't happen
+                       }
+               }
+               
+               public  override bool Equals(System.Object obj)
+               {
+                       if (obj == this)
+                               return true;
+                       if (obj is Payload)
+                       {
+                               Payload other = (Payload) obj;
+                               if (length == other.length)
+                               {
+                                       for (int i = 0; i < length; i++)
+                                               if (data[offset + i] != other.data[other.offset + i])
+                                                       return false;
+                                       return true;
+                               }
+                               else
+                                       return false;
+                       }
+                       else
+                               return false;
+               }
+               
+               public override int GetHashCode()
+               {
+                       return ArrayUtil.HashCode(data, offset, offset + length);
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/PositionBasedTermVectorMapper.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/PositionBasedTermVectorMapper.cs
new file mode 100644 (file)
index 0000000..b1f66a0
--- /dev/null
@@ -0,0 +1,181 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       /// <summary> For each Field, store position by position information.  It ignores frequency information
+       /// <p/>
+       /// This is not thread-safe.
+       /// </summary>
+       public class PositionBasedTermVectorMapper:TermVectorMapper
+       {
+               private System.Collections.IDictionary fieldToTerms;
+               
+               private System.String currentField;
+               /// <summary> A Map of Integer and TVPositionInfo</summary>
+               private System.Collections.IDictionary currentPositions;
+               private bool storeOffsets;
+               
+               
+               
+               
+               /// <summary> 
+               /// 
+               /// </summary>
+               public PositionBasedTermVectorMapper():base(false, false)
+               {
+               }
+               
+               public PositionBasedTermVectorMapper(bool ignoringOffsets):base(false, ignoringOffsets)
+               {
+               }
+               
+               /// <summary> Never ignores positions.  This mapper doesn't make much sense unless there are positions</summary>
+               /// <returns> false
+               /// </returns>
+               public override bool IsIgnoringPositions()
+               {
+                       return false;
+               }
+               
+               /// <summary> Callback for the TermVectorReader. </summary>
+               /// <param name="term">
+               /// </param>
+               /// <param name="frequency">
+               /// </param>
+               /// <param name="offsets">
+               /// </param>
+               /// <param name="positions">
+               /// </param>
+               public override void  Map(System.String term, int frequency, TermVectorOffsetInfo[] offsets, int[] positions)
+               {
+                       for (int i = 0; i < positions.Length; i++)
+                       {
+                               System.Int32 posVal = (System.Int32) positions[i];
+                               TVPositionInfo pos = (TVPositionInfo) currentPositions[posVal];
+                               if (pos == null)
+                               {
+                                       pos = new TVPositionInfo(positions[i], storeOffsets);
+                                       currentPositions[posVal] = pos;
+                               }
+                               pos.addTerm(term, offsets != null?offsets[i]:null);
+                       }
+               }
+               
+               /// <summary> Callback mechanism used by the TermVectorReader</summary>
+               /// <param name="field"> The field being read
+               /// </param>
+               /// <param name="numTerms">The number of terms in the vector
+               /// </param>
+               /// <param name="storeOffsets">Whether offsets are available
+               /// </param>
+               /// <param name="storePositions">Whether positions are available
+               /// </param>
+               public override void  SetExpectations(System.String field, int numTerms, bool storeOffsets, bool storePositions)
+               {
+                       if (storePositions == false)
+                       {
+                               throw new System.SystemException("You must store positions in order to use this Mapper");
+                       }
+                       if (storeOffsets == true)
+                       {
+                               //ignoring offsets
+                       }
+                       fieldToTerms = new System.Collections.Hashtable(numTerms);
+                       this.storeOffsets = storeOffsets;
+                       currentField = field;
+                       currentPositions = new System.Collections.Hashtable();
+                       fieldToTerms[currentField] = currentPositions;
+               }
+               
+               /// <summary> Get the mapping between fields and terms, sorted by the comparator
+               /// 
+               /// </summary>
+               /// <returns> A map between field names and a Map.  The sub-Map key is the position as the integer, the value is {@link Mono.Lucene.Net.Index.PositionBasedTermVectorMapper.TVPositionInfo}.
+               /// </returns>
+               public virtual System.Collections.IDictionary GetFieldToTerms()
+               {
+                       return fieldToTerms;
+               }
+               
+               /// <summary> Container for a term at a position</summary>
+               public class TVPositionInfo
+               {
+                       /// <summary> </summary>
+                       /// <returns> The position of the term
+                       /// </returns>
+                       virtual public int Position
+                       {
+                               get
+                               {
+                                       return position;
+                               }
+                               
+                       }
+                       /// <summary> Note, there may be multiple terms at the same position</summary>
+                       /// <returns> A List of Strings
+                       /// </returns>
+                       virtual public System.Collections.IList Terms
+                       {
+                               get
+                               {
+                                       return terms;
+                               }
+                               
+                       }
+                       /// <summary> Parallel list (to {@link #getTerms()}) of TermVectorOffsetInfo objects.  There may be multiple entries since there may be multiple terms at a position</summary>
+                       /// <returns> A List of TermVectorOffsetInfo objects, if offsets are store.
+                       /// </returns>
+                       virtual public System.Collections.IList Offsets
+                       {
+                               get
+                               {
+                                       return offsets;
+                               }
+                               
+                       }
+                       private int position;
+                       //a list of Strings
+                       private System.Collections.IList terms;
+                       //A list of TermVectorOffsetInfo
+                       private System.Collections.IList offsets;
+                       
+                       
+                       public TVPositionInfo(int position, bool storeOffsets)
+                       {
+                               this.position = position;
+                               terms = new System.Collections.ArrayList();
+                               if (storeOffsets)
+                               {
+                                       offsets = new System.Collections.ArrayList();
+                               }
+                       }
+                       
+                       internal virtual void  addTerm(System.String term, TermVectorOffsetInfo info)
+                       {
+                               terms.Add(term);
+                               if (offsets != null)
+                               {
+                                       offsets.Add(info);
+                               }
+                       }
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/RawPostingList.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/RawPostingList.cs
new file mode 100644 (file)
index 0000000..1deeac2
--- /dev/null
@@ -0,0 +1,46 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       
+       /// <summary>This is the base class for an in-memory posting list,
+       /// keyed by a Token.  {@link TermsHash} maintains a hash
+       /// table holding one instance of this per unique Token.
+       /// Consumers of TermsHash ({@link TermsHashConsumer}) must
+       /// subclass this class with its own concrete class.
+       /// FreqProxTermsWriter.PostingList is a private inner class used 
+       /// for the freq/prox postings, and 
+       /// TermVectorsTermsWriter.PostingList is a private inner class
+       /// used to hold TermVectors postings. 
+       /// </summary>
+       
+       abstract class RawPostingList
+       {
+               internal static readonly int BYTES_SIZE;
+               internal int textStart;
+               internal int intStart;
+               internal int byteStart;
+               static RawPostingList()
+               {
+                       BYTES_SIZE = DocumentsWriter.OBJECT_HEADER_BYTES + 3 * DocumentsWriter.INT_NUM_BYTE;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/ReadOnlyDirectoryReader.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/ReadOnlyDirectoryReader.cs
new file mode 100644 (file)
index 0000000..5bd6cb9
--- /dev/null
@@ -0,0 +1,44 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using Directory = Mono.Lucene.Net.Store.Directory;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       public class ReadOnlyDirectoryReader:DirectoryReader
+       {
+               internal ReadOnlyDirectoryReader(Directory directory, SegmentInfos sis, IndexDeletionPolicy deletionPolicy, int termInfosIndexDivisor):base(directory, sis, deletionPolicy, true, termInfosIndexDivisor)
+               {
+               }
+               
+               internal ReadOnlyDirectoryReader(Directory directory, SegmentInfos infos, SegmentReader[] oldReaders, int[] oldStarts, System.Collections.IDictionary oldNormsCache, bool doClone, int termInfosIndexDivisor):base(directory, infos, oldReaders, oldStarts, oldNormsCache, true, doClone, termInfosIndexDivisor)
+               {
+               }
+               
+               internal ReadOnlyDirectoryReader(IndexWriter writer, SegmentInfos infos, int termInfosIndexDivisor):base(writer, infos, termInfosIndexDivisor)
+               {
+               }
+               
+               protected internal override void  AcquireWriteLock()
+               {
+                       ReadOnlySegmentReader.NoWrite();
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/ReadOnlySegmentReader.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/ReadOnlySegmentReader.cs
new file mode 100644 (file)
index 0000000..1962c4b
--- /dev/null
@@ -0,0 +1,42 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       public class ReadOnlySegmentReader:SegmentReader
+       {
+               
+               internal static void  NoWrite()
+               {
+                       throw new System.NotSupportedException("This IndexReader cannot make any changes to the index (it was opened with readOnly = true)");
+               }
+               
+               protected internal override void  AcquireWriteLock()
+               {
+                       NoWrite();
+               }
+               
+               // Not synchronized
+               public override bool IsDeleted(int n)
+               {
+                       return deletedDocs != null && deletedDocs.Get(n);
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/ReusableStringReader.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/ReusableStringReader.cs
new file mode 100644 (file)
index 0000000..1d61235
--- /dev/null
@@ -0,0 +1,130 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       /// <summary>Used by DocumentsWriter to implemented a StringReader
+       /// that can be reset to a new string; we use this when
+       /// tokenizing the string value from a Field. 
+       /// </summary>
+    sealed class ReusableStringReader : System.IO.TextReader
+    {
+        internal int upto;
+        internal int left;
+        internal System.String s;
+        internal void Init(System.String s)
+        {
+            this.s = s;
+            left = s.Length;
+            this.upto = 0;
+        }
+        public int Read(char[] c)
+        {
+            return Read(c, 0, c.Length);
+        }
+        public override int Read(System.Char[] c, int off, int len)
+        {
+            if (left > len)
+            {
+                SupportClass.TextSupport.GetCharsFromString(s, upto, upto + len, c, off);
+                upto += len;
+                left -= len;
+                return len;
+            }
+            else if (0 == left)
+            {
+                s = null;
+                return 0;
+            }
+            else
+            {
+                SupportClass.TextSupport.GetCharsFromString(s, upto, upto + left, c, off);
+                int r = left;
+                left = 0;
+                upto = s.Length;
+                return r;
+            }
+        }
+        public override void Close()
+        {
+        }
+
+
+        public override int Read()
+        {
+            if (left > 0)
+            {
+                char ch = s[upto];
+                upto += 1;
+                left -= 1;
+                return (int)ch;
+            }
+            return -1;
+        }
+
+        public override int ReadBlock(char[] buffer, int index, int count)
+        {
+            return Read(buffer, index, count);
+        }
+
+        public override string ReadLine()
+        {
+            int i;
+            for (i = upto; i < s.Length; i++)
+            {
+                char c = s[i];
+                if (c == '\r' || c == '\n')
+                {
+                    string result = s.Substring(upto, i - upto);
+                    upto = i + 1;
+                    left = s.Length - upto;
+                    if (c == '\r' && upto < s.Length && s[upto] == '\n')
+                    {
+                        upto++;
+                        left--;
+                    }
+                    return result;
+                }
+            }
+            if (i > upto)
+            {
+                return ReadToEnd();
+            }
+            return null;
+        }
+
+        public override int Peek()
+        {
+            if (left > 0)
+            {
+                return (int)s[upto];
+            }
+            return -1;
+        }
+
+        public override string ReadToEnd()
+        {
+            string result = s.Substring(upto, left);
+            left = 0;
+            upto = s.Length - 1;
+            return result;
+        }
+    }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/SegmentInfo.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/SegmentInfo.cs
new file mode 100644 (file)
index 0000000..b49b78c
--- /dev/null
@@ -0,0 +1,885 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using Directory = Mono.Lucene.Net.Store.Directory;
+using IndexInput = Mono.Lucene.Net.Store.IndexInput;
+using IndexOutput = Mono.Lucene.Net.Store.IndexOutput;
+using BitVector = Mono.Lucene.Net.Util.BitVector;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       /// <summary> Information about a segment such as it's name, directory, and files related
+       /// to the segment.
+       /// 
+       /// * <p/><b>NOTE:</b> This API is new and still experimental
+       /// (subject to change suddenly in the next release)<p/>
+       /// </summary>
+       public sealed class SegmentInfo : System.ICloneable
+       {
+               
+               internal const int NO = - 1; // e.g. no norms; no deletes;
+               internal const int YES = 1; // e.g. have norms; have deletes;
+               internal const int CHECK_DIR = 0; // e.g. must check dir to see if there are norms/deletions
+               internal const int WITHOUT_GEN = 0; // a file name that has no GEN in it. 
+               
+               public System.String name; // unique name in dir
+               public int docCount; // number of docs in seg
+               public Directory dir; // where segment resides
+               
+               private bool preLockless; // true if this is a segments file written before
+               // lock-less commits (2.1)
+               
+               private long delGen; // current generation of del file; NO if there
+               // are no deletes; CHECK_DIR if it's a pre-2.1 segment
+               // (and we must check filesystem); YES or higher if
+               // there are deletes at generation N
+               
+               private long[] normGen; // current generation of each field's norm file.
+               // If this array is null, for lockLess this means no 
+               // separate norms.  For preLockLess this means we must 
+               // check filesystem. If this array is not null, its 
+               // values mean: NO says this field has no separate  
+               // norms; CHECK_DIR says it is a preLockLess segment and    
+               // filesystem must be checked; >= YES says this field  
+               // has separate norms with the specified generation
+               
+               private sbyte isCompoundFile; // NO if it is not; YES if it is; CHECK_DIR if it's
+               // pre-2.1 (ie, must check file system to see
+               // if <name>.cfs and <name>.nrm exist)         
+               
+               private bool hasSingleNormFile; // true if this segment maintains norms in a single file; 
+               // false otherwise
+               // this is currently false for segments populated by DocumentWriter
+               // and true for newly created merged segments (both
+               // compound and non compound).
+               
+               private System.Collections.Generic.IList<string> files; // cached list of files that this segment uses
+               // in the Directory
+               
+               internal long sizeInBytes = - 1; // total byte size of all of our files (computed on demand)
+               
+               private int docStoreOffset; // if this segment shares stored fields & vectors, this
+               // offset is where in that file this segment's docs begin
+               private System.String docStoreSegment; // name used to derive fields/vectors file we share with
+               // other segments
+               private bool docStoreIsCompoundFile; // whether doc store files are stored in compound file (*.cfx)
+               
+               private int delCount; // How many deleted docs in this segment, or -1 if not yet known
+               // (if it's an older index)
+               
+               private bool hasProx; // True if this segment has any fields with omitTermFreqAndPositions==false
+
+        private System.Collections.Generic.IDictionary<string, string> diagnostics;
+               
+               public override System.String ToString()
+               {
+                       return "si: " + dir.ToString() + " " + name + " docCount: " + docCount + " delCount: " + delCount + " delFileName: " + GetDelFileName();
+               }
+               
+               public SegmentInfo(System.String name, int docCount, Directory dir)
+               {
+                       this.name = name;
+                       this.docCount = docCount;
+                       this.dir = dir;
+                       delGen = NO;
+                       isCompoundFile = (sbyte) (CHECK_DIR);
+                       preLockless = true;
+                       hasSingleNormFile = false;
+                       docStoreOffset = - 1;
+                       docStoreSegment = name;
+                       docStoreIsCompoundFile = false;
+                       delCount = 0;
+                       hasProx = true;
+               }
+               
+               public SegmentInfo(System.String name, int docCount, Directory dir, bool isCompoundFile, bool hasSingleNormFile):this(name, docCount, dir, isCompoundFile, hasSingleNormFile, - 1, null, false, true)
+               {
+               }
+               
+               public SegmentInfo(System.String name, int docCount, Directory dir, bool isCompoundFile, bool hasSingleNormFile, int docStoreOffset, System.String docStoreSegment, bool docStoreIsCompoundFile, bool hasProx):this(name, docCount, dir)
+               {
+                       this.isCompoundFile = (sbyte) (isCompoundFile?YES:NO);
+                       this.hasSingleNormFile = hasSingleNormFile;
+                       preLockless = false;
+                       this.docStoreOffset = docStoreOffset;
+                       this.docStoreSegment = docStoreSegment;
+                       this.docStoreIsCompoundFile = docStoreIsCompoundFile;
+                       this.hasProx = hasProx;
+                       delCount = 0;
+                       System.Diagnostics.Debug.Assert(docStoreOffset == - 1 || docStoreSegment != null, "dso=" + docStoreOffset + " dss=" + docStoreSegment + " docCount=" + docCount);
+               }
+               
+               /// <summary> Copy everything from src SegmentInfo into our instance.</summary>
+               internal void  Reset(SegmentInfo src)
+               {
+                       ClearFiles();
+                       name = src.name;
+                       docCount = src.docCount;
+                       dir = src.dir;
+                       preLockless = src.preLockless;
+                       delGen = src.delGen;
+                       docStoreOffset = src.docStoreOffset;
+                       docStoreIsCompoundFile = src.docStoreIsCompoundFile;
+                       if (src.normGen == null)
+                       {
+                               normGen = null;
+                       }
+                       else
+                       {
+                               normGen = new long[src.normGen.Length];
+                               Array.Copy(src.normGen, 0, normGen, 0, src.normGen.Length);
+                       }
+                       isCompoundFile = src.isCompoundFile;
+                       hasSingleNormFile = src.hasSingleNormFile;
+                       delCount = src.delCount;
+               }
+               
+               // must be Map<String, String>
+        internal void SetDiagnostics(System.Collections.Generic.IDictionary<string, string> diagnostics)
+               {
+                       this.diagnostics = diagnostics;
+               }
+               
+               // returns Map<String, String>
+        public System.Collections.Generic.IDictionary<string, string> GetDiagnostics()
+               {
+                       return diagnostics;
+               }
+               
+               /// <summary> Construct a new SegmentInfo instance by reading a
+               /// previously saved SegmentInfo from input.
+               /// 
+               /// </summary>
+               /// <param name="dir">directory to load from
+               /// </param>
+               /// <param name="format">format of the segments info file
+               /// </param>
+               /// <param name="input">input handle to read segment info from
+               /// </param>
+               internal SegmentInfo(Directory dir, int format, IndexInput input)
+               {
+                       this.dir = dir;
+                       name = input.ReadString();
+                       docCount = input.ReadInt();
+                       if (format <= SegmentInfos.FORMAT_LOCKLESS)
+                       {
+                               delGen = input.ReadLong();
+                               if (format <= SegmentInfos.FORMAT_SHARED_DOC_STORE)
+                               {
+                                       docStoreOffset = input.ReadInt();
+                                       if (docStoreOffset != - 1)
+                                       {
+                                               docStoreSegment = input.ReadString();
+                                               docStoreIsCompoundFile = (1 == input.ReadByte());
+                                       }
+                                       else
+                                       {
+                                               docStoreSegment = name;
+                                               docStoreIsCompoundFile = false;
+                                       }
+                               }
+                               else
+                               {
+                                       docStoreOffset = - 1;
+                                       docStoreSegment = name;
+                                       docStoreIsCompoundFile = false;
+                               }
+                               if (format <= SegmentInfos.FORMAT_SINGLE_NORM_FILE)
+                               {
+                                       hasSingleNormFile = (1 == input.ReadByte());
+                               }
+                               else
+                               {
+                                       hasSingleNormFile = false;
+                               }
+                               int numNormGen = input.ReadInt();
+                               if (numNormGen == NO)
+                               {
+                                       normGen = null;
+                               }
+                               else
+                               {
+                                       normGen = new long[numNormGen];
+                                       for (int j = 0; j < numNormGen; j++)
+                                       {
+                                               normGen[j] = input.ReadLong();
+                                       }
+                               }
+                               isCompoundFile = (sbyte) input.ReadByte();
+                               preLockless = (isCompoundFile == CHECK_DIR);
+                               if (format <= SegmentInfos.FORMAT_DEL_COUNT)
+                               {
+                                       delCount = input.ReadInt();
+                                       System.Diagnostics.Debug.Assert(delCount <= docCount);
+                               }
+                               else
+                                       delCount = - 1;
+                               if (format <= SegmentInfos.FORMAT_HAS_PROX)
+                                       hasProx = input.ReadByte() == 1;
+                               else
+                                       hasProx = true;
+                               
+                               if (format <= SegmentInfos.FORMAT_DIAGNOSTICS)
+                               {
+                                       diagnostics = input.ReadStringStringMap();
+                               }
+                               else
+                               {
+                                       diagnostics = new System.Collections.Generic.Dictionary<string,string>();
+                               }
+                       }
+                       else
+                       {
+                               delGen = CHECK_DIR;
+                               normGen = null;
+                               isCompoundFile = (sbyte) (CHECK_DIR);
+                               preLockless = true;
+                               hasSingleNormFile = false;
+                               docStoreOffset = - 1;
+                               docStoreIsCompoundFile = false;
+                               docStoreSegment = null;
+                               delCount = - 1;
+                               hasProx = true;
+                               diagnostics = new System.Collections.Generic.Dictionary<string,string>();
+                       }
+               }
+               
+               internal void  SetNumFields(int numFields)
+               {
+                       if (normGen == null)
+                       {
+                               // normGen is null if we loaded a pre-2.1 segment
+                               // file, or, if this segments file hasn't had any
+                               // norms set against it yet:
+                               normGen = new long[numFields];
+                               
+                               if (preLockless)
+                               {
+                                       // Do nothing: thus leaving normGen[k]==CHECK_DIR (==0), so that later we know  
+                                       // we have to check filesystem for norm files, because this is prelockless.
+                               }
+                               else
+                               {
+                                       // This is a FORMAT_LOCKLESS segment, which means
+                                       // there are no separate norms:
+                                       for (int i = 0; i < numFields; i++)
+                                       {
+                                               normGen[i] = NO;
+                                       }
+                               }
+                       }
+               }
+               
+               /// <summary>Returns total size in bytes of all of files used by
+               /// this segment. 
+               /// </summary>
+               public long SizeInBytes()
+               {
+                       if (sizeInBytes == - 1)
+                       {
+                               System.Collections.Generic.IList<string> files = Files();
+                               int size = files.Count;
+                               sizeInBytes = 0;
+                               for (int i = 0; i < size; i++)
+                               {
+                                       System.String fileName = (System.String) files[i];
+                                       // We don't count bytes used by a shared doc store
+                                       // against this segment:
+                                       if (docStoreOffset == - 1 || !IndexFileNames.IsDocStoreFile(fileName))
+                                               sizeInBytes += dir.FileLength(fileName);
+                               }
+                       }
+                       return sizeInBytes;
+               }
+               
+               public bool HasDeletions()
+               {
+                       // Cases:
+                       //
+                       //   delGen == NO: this means this segment was written
+                       //     by the LOCKLESS code and for certain does not have
+                       //     deletions yet
+                       //
+                       //   delGen == CHECK_DIR: this means this segment was written by
+                       //     pre-LOCKLESS code which means we must check
+                       //     directory to see if .del file exists
+                       //
+                       //   delGen >= YES: this means this segment was written by
+                       //     the LOCKLESS code and for certain has
+                       //     deletions
+                       //
+                       if (delGen == NO)
+                       {
+                               return false;
+                       }
+                       else if (delGen >= YES)
+                       {
+                               return true;
+                       }
+                       else
+                       {
+                               return dir.FileExists(GetDelFileName());
+                       }
+               }
+               
+               internal void  AdvanceDelGen()
+               {
+                       // delGen 0 is reserved for pre-LOCKLESS format
+                       if (delGen == NO)
+                       {
+                               delGen = YES;
+                       }
+                       else
+                       {
+                               delGen++;
+                       }
+                       ClearFiles();
+               }
+               
+               internal void  ClearDelGen()
+               {
+                       delGen = NO;
+                       ClearFiles();
+               }
+               
+               public System.Object Clone()
+               {
+                       SegmentInfo si = new SegmentInfo(name, docCount, dir);
+                       si.isCompoundFile = isCompoundFile;
+                       si.delGen = delGen;
+                       si.delCount = delCount;
+                       si.hasProx = hasProx;
+                       si.preLockless = preLockless;
+                       si.hasSingleNormFile = hasSingleNormFile;
+            if (this.diagnostics != null)
+            {
+                si.diagnostics = new System.Collections.Generic.Dictionary<string, string>();
+                foreach (string o in diagnostics.Keys)
+                {
+                    si.diagnostics.Add(o,diagnostics[o]);
+                }
+            }
+                       if (normGen != null)
+                       {
+                               si.normGen = new long[normGen.Length];
+                               normGen.CopyTo(si.normGen, 0);
+                       }
+                       si.docStoreOffset = docStoreOffset;
+                       si.docStoreSegment = docStoreSegment;
+                       si.docStoreIsCompoundFile = docStoreIsCompoundFile;
+            if (this.files != null)
+            {
+                si.files = new System.Collections.Generic.List<string>();
+                foreach (string file in files)
+                {
+                    si.files.Add(file);
+                }
+            }
+            
+                       return si;
+               }
+               
+               public System.String GetDelFileName()
+               {
+                       if (delGen == NO)
+                       {
+                               // In this case we know there is no deletion filename
+                               // against this segment
+                               return null;
+                       }
+                       else
+                       {
+                               // If delGen is CHECK_DIR, it's the pre-lockless-commit file format
+                               return IndexFileNames.FileNameFromGeneration(name, "." + IndexFileNames.DELETES_EXTENSION, delGen);
+                       }
+               }
+
+        /// <summary> Returns true if this field for this segment has saved a separate norms file (_&lt;segment&gt;_N.sX).
+               /// 
+               /// </summary>
+               /// <param name="fieldNumber">the field index to check
+               /// </param>
+               public bool HasSeparateNorms(int fieldNumber)
+               {
+                       if ((normGen == null && preLockless) || (normGen != null && normGen[fieldNumber] == CHECK_DIR))
+                       {
+                               // Must fallback to directory file exists check:
+                               System.String fileName = name + ".s" + fieldNumber;
+                               return dir.FileExists(fileName);
+                       }
+                       else if (normGen == null || normGen[fieldNumber] == NO)
+                       {
+                               return false;
+                       }
+                       else
+                       {
+                               return true;
+                       }
+               }
+               
+               /// <summary> Returns true if any fields in this segment have separate norms.</summary>
+               public bool HasSeparateNorms()
+               {
+                       if (normGen == null)
+                       {
+                               if (!preLockless)
+                               {
+                                       // This means we were created w/ LOCKLESS code and no
+                                       // norms are written yet:
+                                       return false;
+                               }
+                               else
+                               {
+                                       // This means this segment was saved with pre-LOCKLESS
+                                       // code.  So we must fallback to the original
+                                       // directory list check:
+                                       System.String[] result = dir.List();
+                                       if (result == null)
+                                       {
+                                               throw new System.IO.IOException("cannot read directory " + dir + ": list() returned null");
+                                       }
+                                       
+                                       System.String pattern;
+                                       pattern = name + ".s";
+                                       int patternLength = pattern.Length;
+                                       for (int i = 0; i < result.Length; i++)
+                                       {
+                                               if (result[i].StartsWith(pattern) && System.Char.IsDigit(result[i][patternLength]))
+                                                       return true;
+                                       }
+                                       return false;
+                               }
+                       }
+                       else
+                       {
+                               // This means this segment was saved with LOCKLESS
+                               // code so we first check whether any normGen's are >= 1
+                               // (meaning they definitely have separate norms):
+                               for (int i = 0; i < normGen.Length; i++)
+                               {
+                                       if (normGen[i] >= YES)
+                                       {
+                                               return true;
+                                       }
+                               }
+                               // Next we look for any == 0.  These cases were
+                               // pre-LOCKLESS and must be checked in directory:
+                               for (int i = 0; i < normGen.Length; i++)
+                               {
+                                       if (normGen[i] == CHECK_DIR)
+                                       {
+                                               if (HasSeparateNorms(i))
+                                               {
+                                                       return true;
+                                               }
+                                       }
+                               }
+                       }
+                       
+                       return false;
+               }
+               
+               /// <summary> Increment the generation count for the norms file for
+               /// this field.
+               /// 
+               /// </summary>
+               /// <param name="fieldIndex">field whose norm file will be rewritten
+               /// </param>
+               internal void  AdvanceNormGen(int fieldIndex)
+               {
+                       if (normGen[fieldIndex] == NO)
+                       {
+                               normGen[fieldIndex] = YES;
+                       }
+                       else
+                       {
+                               normGen[fieldIndex]++;
+                       }
+                       ClearFiles();
+               }
+               
+               /// <summary> Get the file name for the norms file for this field.
+               /// 
+               /// </summary>
+               /// <param name="number">field index
+               /// </param>
+               public System.String GetNormFileName(int number)
+               {
+                       System.String prefix;
+                       
+                       long gen;
+                       if (normGen == null)
+                       {
+                               gen = CHECK_DIR;
+                       }
+                       else
+                       {
+                               gen = normGen[number];
+                       }
+                       
+                       if (HasSeparateNorms(number))
+                       {
+                               // case 1: separate norm
+                               prefix = ".s";
+                               return IndexFileNames.FileNameFromGeneration(name, prefix + number, gen);
+                       }
+                       
+                       if (hasSingleNormFile)
+                       {
+                               // case 2: lockless (or nrm file exists) - single file for all norms 
+                               prefix = "." + IndexFileNames.NORMS_EXTENSION;
+                               return IndexFileNames.FileNameFromGeneration(name, prefix, WITHOUT_GEN);
+                       }
+                       
+                       // case 3: norm file for each field
+                       prefix = ".f";
+                       return IndexFileNames.FileNameFromGeneration(name, prefix + number, WITHOUT_GEN);
+               }
+               
+               /// <summary> Mark whether this segment is stored as a compound file.
+               /// 
+               /// </summary>
+               /// <param name="isCompoundFile">true if this is a compound file;
+               /// else, false
+               /// </param>
+               internal void  SetUseCompoundFile(bool isCompoundFile)
+               {
+                       if (isCompoundFile)
+                       {
+                               this.isCompoundFile = (sbyte) (YES);
+                       }
+                       else
+                       {
+                               this.isCompoundFile = (sbyte) (NO);
+                       }
+                       ClearFiles();
+               }
+               
+               /// <summary> Returns true if this segment is stored as a compound
+               /// file; else, false.
+               /// </summary>
+               public bool GetUseCompoundFile()
+               {
+                       if (isCompoundFile == NO)
+                       {
+                               return false;
+                       }
+                       else if (isCompoundFile == YES)
+                       {
+                               return true;
+                       }
+                       else
+                       {
+                               return dir.FileExists(name + "." + IndexFileNames.COMPOUND_FILE_EXTENSION);
+                       }
+               }
+               
+               public int GetDelCount()
+               {
+                       if (delCount == - 1)
+                       {
+                               if (HasDeletions())
+                               {
+                                       System.String delFileName = GetDelFileName();
+                                       delCount = new BitVector(dir, delFileName).Count();
+                               }
+                               else
+                                       delCount = 0;
+                       }
+                       System.Diagnostics.Debug.Assert(delCount <= docCount);
+                       return delCount;
+               }
+               
+               internal void  SetDelCount(int delCount)
+               {
+                       this.delCount = delCount;
+                       System.Diagnostics.Debug.Assert(delCount <= docCount);
+               }
+               
+               public int GetDocStoreOffset()
+               {
+                       return docStoreOffset;
+               }
+               
+               public bool GetDocStoreIsCompoundFile()
+               {
+                       return docStoreIsCompoundFile;
+               }
+               
+               internal void  SetDocStoreIsCompoundFile(bool v)
+               {
+                       docStoreIsCompoundFile = v;
+                       ClearFiles();
+               }
+               
+               public System.String GetDocStoreSegment()
+               {
+                       return docStoreSegment;
+               }
+               
+               internal void  SetDocStoreOffset(int offset)
+               {
+                       docStoreOffset = offset;
+                       ClearFiles();
+               }
+               
+               internal void  SetDocStore(int offset, System.String segment, bool isCompoundFile)
+               {
+                       docStoreOffset = offset;
+                       docStoreSegment = segment;
+                       docStoreIsCompoundFile = isCompoundFile;
+               }
+               
+               /// <summary> Save this segment's info.</summary>
+               internal void  Write(IndexOutput output)
+               {
+                       output.WriteString(name);
+                       output.WriteInt(docCount);
+                       output.WriteLong(delGen);
+                       output.WriteInt(docStoreOffset);
+                       if (docStoreOffset != - 1)
+                       {
+                               output.WriteString(docStoreSegment);
+                               output.WriteByte((byte) (docStoreIsCompoundFile?1:0));
+                       }
+                       
+                       output.WriteByte((byte) (hasSingleNormFile?1:0));
+                       if (normGen == null)
+                       {
+                               output.WriteInt(NO);
+                       }
+                       else
+                       {
+                               output.WriteInt(normGen.Length);
+                               for (int j = 0; j < normGen.Length; j++)
+                               {
+                                       output.WriteLong(normGen[j]);
+                               }
+                       }
+                       output.WriteByte((byte) isCompoundFile);
+                       output.WriteInt(delCount);
+                       output.WriteByte((byte) (hasProx?1:0));
+                       output.WriteStringStringMap(diagnostics);
+               }
+               
+               internal void  SetHasProx(bool hasProx)
+               {
+                       this.hasProx = hasProx;
+                       ClearFiles();
+               }
+               
+               public bool GetHasProx()
+               {
+                       return hasProx;
+               }
+               
+               private void  AddIfExists(System.Collections.Generic.IList<string> files, System.String fileName)
+               {
+                       if (dir.FileExists(fileName))
+                               files.Add(fileName);
+               }
+               
+               /*
+               * Return all files referenced by this SegmentInfo.  The
+               * returns List is a locally cached List so you should not
+               * modify it.
+               */
+               
+               public System.Collections.Generic.IList<string> Files()
+               {
+                       
+                       if (files != null)
+                       {
+                               // Already cached:
+                               return files;
+                       }
+
+            System.Collections.Generic.List<string> fileList = new System.Collections.Generic.List<string>();
+                       
+                       bool useCompoundFile = GetUseCompoundFile();
+                       
+                       if (useCompoundFile)
+                       {
+                fileList.Add(name + "." + IndexFileNames.COMPOUND_FILE_EXTENSION);
+                       }
+                       else
+                       {
+                               System.String[] exts = IndexFileNames.NON_STORE_INDEX_EXTENSIONS;
+                               for (int i = 0; i < exts.Length; i++)
+                    AddIfExists(fileList, name + "." + exts[i]);
+                       }
+                       
+                       if (docStoreOffset != - 1)
+                       {
+                               // We are sharing doc stores (stored fields, term
+                               // vectors) with other segments
+                               System.Diagnostics.Debug.Assert(docStoreSegment != null);
+                               if (docStoreIsCompoundFile)
+                               {
+                    fileList.Add(docStoreSegment + "." + IndexFileNames.COMPOUND_FILE_STORE_EXTENSION);
+                               }
+                               else
+                               {
+                                       System.String[] exts = IndexFileNames.STORE_INDEX_EXTENSIONS;
+                                       for (int i = 0; i < exts.Length; i++)
+                        AddIfExists(fileList, docStoreSegment + "." + exts[i]);
+                               }
+                       }
+                       else if (!useCompoundFile)
+                       {
+                               // We are not sharing, and, these files were not
+                               // included in the compound file
+                               System.String[] exts = IndexFileNames.STORE_INDEX_EXTENSIONS;
+                               for (int i = 0; i < exts.Length; i++)
+                    AddIfExists(fileList, name + "." + exts[i]);
+                       }
+                       
+                       System.String delFileName = IndexFileNames.FileNameFromGeneration(name, "." + IndexFileNames.DELETES_EXTENSION, delGen);
+                       if (delFileName != null && (delGen >= YES || dir.FileExists(delFileName)))
+                       {
+                fileList.Add(delFileName);
+                       }
+                       
+                       // Careful logic for norms files    
+                       if (normGen != null)
+                       {
+                               for (int i = 0; i < normGen.Length; i++)
+                               {
+                                       long gen = normGen[i];
+                                       if (gen >= YES)
+                                       {
+                                               // Definitely a separate norm file, with generation:
+                        fileList.Add(IndexFileNames.FileNameFromGeneration(name, "." + IndexFileNames.SEPARATE_NORMS_EXTENSION + i, gen));
+                                       }
+                                       else if (NO == gen)
+                                       {
+                                               // No separate norms but maybe plain norms
+                                               // in the non compound file case:
+                                               if (!hasSingleNormFile && !useCompoundFile)
+                                               {
+                                                       System.String fileName = name + "." + IndexFileNames.PLAIN_NORMS_EXTENSION + i;
+                                                       if (dir.FileExists(fileName))
+                                                       {
+                                fileList.Add(fileName);
+                                                       }
+                                               }
+                                       }
+                                       else if (CHECK_DIR == gen)
+                                       {
+                                               // Pre-2.1: we have to check file existence
+                                               System.String fileName = null;
+                                               if (useCompoundFile)
+                                               {
+                                                       fileName = name + "." + IndexFileNames.SEPARATE_NORMS_EXTENSION + i;
+                                               }
+                                               else if (!hasSingleNormFile)
+                                               {
+                                                       fileName = name + "." + IndexFileNames.PLAIN_NORMS_EXTENSION + i;
+                                               }
+                                               if (fileName != null && dir.FileExists(fileName))
+                                               {
+                            fileList.Add(fileName);
+                                               }
+                                       }
+                               }
+                       }
+                       else if (preLockless || (!hasSingleNormFile && !useCompoundFile))
+                       {
+                               // Pre-2.1: we have to scan the dir to find all
+                               // matching _X.sN/_X.fN files for our segment:
+                               System.String prefix;
+                               if (useCompoundFile)
+                                       prefix = name + "." + IndexFileNames.SEPARATE_NORMS_EXTENSION;
+                               else
+                                       prefix = name + "." + IndexFileNames.PLAIN_NORMS_EXTENSION;
+                               int prefixLength = prefix.Length;
+                               System.String[] allFiles = dir.ListAll();
+                               IndexFileNameFilter filter = IndexFileNameFilter.GetFilter();
+                               for (int i = 0; i < allFiles.Length; i++)
+                               {
+                                       System.String fileName = allFiles[i];
+                                       if (filter.Accept(null, fileName) && fileName.Length > prefixLength && System.Char.IsDigit(fileName[prefixLength]) && fileName.StartsWith(prefix))
+                                       {
+                                               fileList.Add(fileName);
+                                       }
+                               }
+                       }
+            //System.Diagnostics.Debug.Assert();
+            files = fileList;
+                       return files;
+               }
+               
+               /* Called whenever any change is made that affects which
+               * files this segment has. */
+               private void  ClearFiles()
+               {
+                       files = null;
+                       sizeInBytes = - 1;
+               }
+               
+               /// <summary>Used for debugging </summary>
+               public System.String SegString(Directory dir)
+               {
+                       System.String cfs;
+                       try
+                       {
+                               if (GetUseCompoundFile())
+                                       cfs = "c";
+                               else
+                                       cfs = "C";
+                       }
+                       catch (System.IO.IOException ioe)
+                       {
+                               cfs = "?";
+                       }
+                       
+                       System.String docStore;
+                       
+                       if (docStoreOffset != - 1)
+                               docStore = "->" + docStoreSegment;
+                       else
+                               docStore = "";
+                       
+                       return name + ":" + cfs + (this.dir == dir?"":"x") + docCount + docStore;
+               }
+               
+               /// <summary>We consider another SegmentInfo instance equal if it
+               /// has the same dir and same name. 
+               /// </summary>
+               public  override bool Equals(System.Object obj)
+               {
+                       SegmentInfo other;
+                       try
+                       {
+                               other = (SegmentInfo) obj;
+                       }
+                       catch (System.InvalidCastException cce)
+                       {
+                               return false;
+                       }
+                       return other.dir == dir && other.name.Equals(name);
+               }
+               
+               public override int GetHashCode()
+               {
+                       return dir.GetHashCode() + name.GetHashCode();
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/SegmentInfos.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/SegmentInfos.cs
new file mode 100644 (file)
index 0000000..c93fb4d
--- /dev/null
@@ -0,0 +1,1116 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using ChecksumIndexInput = Mono.Lucene.Net.Store.ChecksumIndexInput;
+using ChecksumIndexOutput = Mono.Lucene.Net.Store.ChecksumIndexOutput;
+using Directory = Mono.Lucene.Net.Store.Directory;
+using IndexInput = Mono.Lucene.Net.Store.IndexInput;
+using IndexOutput = Mono.Lucene.Net.Store.IndexOutput;
+using NoSuchDirectoryException = Mono.Lucene.Net.Store.NoSuchDirectoryException;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       /// <summary> A collection of segmentInfo objects with methods for operating on
+       /// those segments in relation to the file system.
+       /// 
+       /// <p/><b>NOTE:</b> This API is new and still experimental
+       /// (subject to change suddenly in the next release)<p/>
+       /// </summary>
+       [Serializable]
+       public sealed class SegmentInfos:System.Collections.ArrayList
+       {
+               private class AnonymousClassFindSegmentsFile:FindSegmentsFile
+               {
+                       private void  InitBlock(SegmentInfos enclosingInstance)
+                       {
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private SegmentInfos enclosingInstance;
+                       public SegmentInfos Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       internal AnonymousClassFindSegmentsFile(SegmentInfos enclosingInstance, Mono.Lucene.Net.Store.Directory Param1):base(Param1)
+                       {
+                               InitBlock(enclosingInstance);
+                       }
+                       
+                       public /*protected internal*/ override System.Object DoBody(System.String segmentFileName)
+                       {
+                               Enclosing_Instance.Read(directory, segmentFileName);
+                               return null;
+                       }
+               }
+               /// <summary>The file format version, a negative number. </summary>
+               /* Works since counter, the old 1st entry, is always >= 0 */
+               public const int FORMAT = - 1;
+               
+               /// <summary>This format adds details used for lockless commits.  It differs
+               /// slightly from the previous format in that file names
+               /// are never re-used (write once).  Instead, each file is
+               /// written to the next generation.  For example,
+               /// segments_1, segments_2, etc.  This allows us to not use
+               /// a commit lock.  See <a
+               /// href="http://lucene.apache.org/java/docs/fileformats.html">file
+               /// formats</a> for details.
+               /// </summary>
+               public const int FORMAT_LOCKLESS = - 2;
+               
+               /// <summary>This format adds a "hasSingleNormFile" flag into each segment info.
+               /// See <a href="http://issues.apache.org/jira/browse/LUCENE-756">LUCENE-756</a>
+               /// for details.
+               /// </summary>
+               public const int FORMAT_SINGLE_NORM_FILE = - 3;
+               
+               /// <summary>This format allows multiple segments to share a single
+               /// vectors and stored fields file. 
+               /// </summary>
+               public const int FORMAT_SHARED_DOC_STORE = - 4;
+               
+               /// <summary>This format adds a checksum at the end of the file to
+               /// ensure all bytes were successfully written. 
+               /// </summary>
+               public const int FORMAT_CHECKSUM = - 5;
+               
+               /// <summary>This format adds the deletion count for each segment.
+               /// This way IndexWriter can efficiently report numDocs(). 
+               /// </summary>
+               public const int FORMAT_DEL_COUNT = - 6;
+               
+               /// <summary>This format adds the boolean hasProx to record if any
+               /// fields in the segment store prox information (ie, have
+               /// omitTermFreqAndPositions==false) 
+               /// </summary>
+               public const int FORMAT_HAS_PROX = - 7;
+               
+               /// <summary>This format adds optional commit userData (String) storage. </summary>
+               public const int FORMAT_USER_DATA = - 8;
+               
+               /// <summary>This format adds optional per-segment String
+               /// dianostics storage, and switches userData to Map 
+               /// </summary>
+               public const int FORMAT_DIAGNOSTICS = - 9;
+               
+               /* This must always point to the most recent file format. */
+               internal static readonly int CURRENT_FORMAT = FORMAT_DIAGNOSTICS;
+               
+               public int counter = 0; // used to name new segments
+               /// <summary> counts how often the index has been changed by adding or deleting docs.
+               /// starting with the current time in milliseconds forces to create unique version numbers.
+               /// </summary>
+               private long version = (DateTime.Now.Ticks / TimeSpan.TicksPerMillisecond);
+               
+               private long generation = 0; // generation of the "segments_N" for the next commit
+               private long lastGeneration = 0; // generation of the "segments_N" file we last successfully read
+               // or wrote; this is normally the same as generation except if
+               // there was an IOException that had interrupted a commit
+
+        private System.Collections.Generic.IDictionary<string, string> userData = new System.Collections.Generic.Dictionary<string, string>(); // Opaque Map<String, String> that user can specify during IndexWriter.commit
+               
+               /// <summary> If non-null, information about loading segments_N files</summary>
+               /// <seealso cref="setInfoStream">
+               /// </seealso>
+               private static System.IO.StreamWriter infoStream;
+               
+               public SegmentInfo Info(int i)
+               {
+                       return (SegmentInfo) this[i];
+               }
+               
+               /// <summary> Get the generation (N) of the current segments_N file
+               /// from a list of files.
+               /// 
+               /// </summary>
+               /// <param name="files">-- array of file names to check
+               /// </param>
+               public static long GetCurrentSegmentGeneration(System.String[] files)
+               {
+                       if (files == null)
+                       {
+                               return - 1;
+                       }
+                       long max = - 1;
+                       for (int i = 0; i < files.Length; i++)
+                       {
+                               System.String file = files[i];
+                               if (file.StartsWith(IndexFileNames.SEGMENTS) && !file.Equals(IndexFileNames.SEGMENTS_GEN))
+                               {
+                                       long gen = GenerationFromSegmentsFileName(file);
+                                       if (gen > max)
+                                       {
+                                               max = gen;
+                                       }
+                               }
+                       }
+                       return max;
+               }
+               
+               /// <summary> Get the generation (N) of the current segments_N file
+               /// in the directory.
+               /// 
+               /// </summary>
+               /// <param name="directory">-- directory to search for the latest segments_N file
+               /// </param>
+               public static long GetCurrentSegmentGeneration(Directory directory)
+               {
+                       try
+                       {
+                               return GetCurrentSegmentGeneration(directory.ListAll());
+                       }
+                       catch (NoSuchDirectoryException nsde)
+                       {
+                               return - 1;
+                       }
+               }
+               
+               /// <summary> Get the filename of the current segments_N file
+               /// from a list of files.
+               /// 
+               /// </summary>
+               /// <param name="files">-- array of file names to check
+               /// </param>
+               
+               public static System.String GetCurrentSegmentFileName(System.String[] files)
+               {
+                       return IndexFileNames.FileNameFromGeneration(IndexFileNames.SEGMENTS, "", GetCurrentSegmentGeneration(files));
+               }
+               
+               /// <summary> Get the filename of the current segments_N file
+               /// in the directory.
+               /// 
+               /// </summary>
+               /// <param name="directory">-- directory to search for the latest segments_N file
+               /// </param>
+               public static System.String GetCurrentSegmentFileName(Directory directory)
+               {
+                       return IndexFileNames.FileNameFromGeneration(IndexFileNames.SEGMENTS, "", GetCurrentSegmentGeneration(directory));
+               }
+               
+               /// <summary> Get the segments_N filename in use by this segment infos.</summary>
+               public System.String GetCurrentSegmentFileName()
+               {
+                       return IndexFileNames.FileNameFromGeneration(IndexFileNames.SEGMENTS, "", lastGeneration);
+               }
+               
+               /// <summary> Parse the generation off the segments file name and
+               /// return it.
+               /// </summary>
+               public static long GenerationFromSegmentsFileName(System.String fileName)
+               {
+                       if (fileName.Equals(IndexFileNames.SEGMENTS))
+                       {
+                               return 0;
+                       }
+                       else if (fileName.StartsWith(IndexFileNames.SEGMENTS))
+                       {
+                               return SupportClass.Number.ToInt64(fileName.Substring(1 + IndexFileNames.SEGMENTS.Length));
+                       }
+                       else
+                       {
+                               throw new System.ArgumentException("fileName \"" + fileName + "\" is not a segments file");
+                       }
+               }
+               
+               
+               /// <summary> Get the next segments_N filename that will be written.</summary>
+               public System.String GetNextSegmentFileName()
+               {
+                       long nextGeneration;
+                       
+                       if (generation == - 1)
+                       {
+                               nextGeneration = 1;
+                       }
+                       else
+                       {
+                               nextGeneration = generation + 1;
+                       }
+                       return IndexFileNames.FileNameFromGeneration(IndexFileNames.SEGMENTS, "", nextGeneration);
+               }
+               
+               /// <summary> Read a particular segmentFileName.  Note that this may
+               /// throw an IOException if a commit is in process.
+               /// 
+               /// </summary>
+               /// <param name="directory">-- directory containing the segments file
+               /// </param>
+               /// <param name="segmentFileName">-- segment file to load
+               /// </param>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               public void  Read(Directory directory, System.String segmentFileName)
+               {
+                       bool success = false;
+                       
+                       // Clear any previous segments:
+                       Clear();
+                       
+                       ChecksumIndexInput input = new ChecksumIndexInput(directory.OpenInput(segmentFileName));
+                       
+                       generation = GenerationFromSegmentsFileName(segmentFileName);
+                       
+                       lastGeneration = generation;
+                       
+                       try
+                       {
+                               int format = input.ReadInt();
+                               if (format < 0)
+                               {
+                                       // file contains explicit format info
+                                       // check that it is a format we can understand
+                                       if (format < CURRENT_FORMAT)
+                                               throw new CorruptIndexException("Unknown format version: " + format);
+                                       version = input.ReadLong(); // read version
+                                       counter = input.ReadInt(); // read counter
+                               }
+                               else
+                               {
+                                       // file is in old format without explicit format info
+                                       counter = format;
+                               }
+                               
+                               for (int i = input.ReadInt(); i > 0; i--)
+                               {
+                                       // read segmentInfos
+                                       Add(new SegmentInfo(directory, format, input));
+                               }
+                               
+                               if (format >= 0)
+                               {
+                                       // in old format the version number may be at the end of the file
+                                       if (input.GetFilePointer() >= input.Length())
+                                               version = (DateTime.Now.Ticks / TimeSpan.TicksPerMillisecond);
+                                       // old file format without version number
+                                       else
+                                               version = input.ReadLong(); // read version
+                               }
+                               
+                               if (format <= FORMAT_USER_DATA)
+                               {
+                                       if (format <= FORMAT_DIAGNOSTICS)
+                                       {
+                                               userData = input.ReadStringStringMap();
+                                       }
+                                       else if (0 != input.ReadByte())
+                                       {
+                        userData = new System.Collections.Generic.Dictionary<string,string>();
+                                               userData.Add("userData", input.ReadString());
+                                       }
+                                       else
+                                       {
+                        userData = new System.Collections.Generic.Dictionary<string, string>();
+                                       }
+                               }
+                               else
+                               {
+                    userData = new System.Collections.Generic.Dictionary<string, string>();
+                               }
+                               
+                               if (format <= FORMAT_CHECKSUM)
+                               {
+                                       long checksumNow = input.GetChecksum();
+                                       long checksumThen = input.ReadLong();
+                                       if (checksumNow != checksumThen)
+                                               throw new CorruptIndexException("checksum mismatch in segments file");
+                               }
+                               success = true;
+                       }
+                       finally
+                       {
+                               input.Close();
+                               if (!success)
+                               {
+                                       // Clear any segment infos we had loaded so we
+                                       // have a clean slate on retry:
+                                       Clear();
+                               }
+                       }
+               }
+               
+               /// <summary> This version of read uses the retry logic (for lock-less
+               /// commits) to find the right segments file to load.
+               /// </summary>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               public void  Read(Directory directory)
+               {
+                       
+                       generation = lastGeneration = - 1;
+                       
+                       new AnonymousClassFindSegmentsFile(this, directory).Run();
+               }
+               
+               // Only non-null after prepareCommit has been called and
+               // before finishCommit is called
+               internal ChecksumIndexOutput pendingSegnOutput;
+               
+               private void  Write(Directory directory)
+               {
+                       
+                       System.String segmentFileName = GetNextSegmentFileName();
+                       
+                       // Always advance the generation on write:
+                       if (generation == - 1)
+                       {
+                               generation = 1;
+                       }
+                       else
+                       {
+                               generation++;
+                       }
+                       
+                       ChecksumIndexOutput segnOutput = new ChecksumIndexOutput(directory.CreateOutput(segmentFileName));
+                       
+                       bool success = false;
+                       
+                       try
+                       {
+                               segnOutput.WriteInt(CURRENT_FORMAT); // write FORMAT
+                               segnOutput.WriteLong(++version); // every write changes
+                               // the index
+                               segnOutput.WriteInt(counter); // write counter
+                               segnOutput.WriteInt(Count); // write infos
+                               for (int i = 0; i < Count; i++)
+                               {
+                                       Info(i).Write(segnOutput);
+                               }
+                               segnOutput.WriteStringStringMap(userData);
+                               segnOutput.PrepareCommit();
+                               success = true;
+                               pendingSegnOutput = segnOutput;
+                       }
+                       finally
+                       {
+                               if (!success)
+                               {
+                                       // We hit an exception above; try to close the file
+                                       // but suppress any exception:
+                                       try
+                                       {
+                                               segnOutput.Close();
+                                       }
+                                       catch (System.Exception t)
+                                       {
+                                               // Suppress so we keep throwing the original exception
+                                       }
+                                       try
+                                       {
+                                               // Try not to leave a truncated segments_N file in
+                                               // the index:
+                                               directory.DeleteFile(segmentFileName);
+                                       }
+                                       catch (System.Exception t)
+                                       {
+                                               // Suppress so we keep throwing the original exception
+                                       }
+                               }
+                       }
+               }
+               
+               /// <summary> Returns a copy of this instance, also copying each
+               /// SegmentInfo.
+               /// </summary>
+               
+               public override System.Object Clone()
+               {
+            SegmentInfos sis = new SegmentInfos();
+            for (int i = 0; i < this.Count; i++)
+            {
+                sis.Add(((SegmentInfo) this[i]).Clone());
+            }
+            sis.counter = this.counter;
+            sis.generation = this.generation;
+            sis.lastGeneration = this.lastGeneration;
+            // sis.pendingSegnOutput = this.pendingSegnOutput; // {{Aroush-2.9}} needed?
+            sis.userData = new System.Collections.Generic.Dictionary<string, string>(userData);
+            sis.version = this.version;
+            return sis;
+               }
+               
+               /// <summary> version number when this SegmentInfos was generated.</summary>
+               public long GetVersion()
+               {
+                       return version;
+               }
+               public long GetGeneration()
+               {
+                       return generation;
+               }
+               public long GetLastGeneration()
+               {
+                       return lastGeneration;
+               }
+               
+               /// <summary> Current version number from segments file.</summary>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               public static long ReadCurrentVersion(Directory directory)
+               {
+            // Fully read the segments file: this ensures that it's
+            // completely written so that if
+            // IndexWriter.prepareCommit has been called (but not
+            // yet commit), then the reader will still see itself as
+            // current:
+            SegmentInfos sis = new SegmentInfos();
+            sis.Read(directory);
+            return sis.version;
+                       //return (long) ((System.Int64) new AnonymousClassFindSegmentsFile1(directory).Run());
+            //DIGY: AnonymousClassFindSegmentsFile1 can safely be deleted
+               }
+               
+               /// <summary> Returns userData from latest segments file</summary>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+        public static System.Collections.Generic.IDictionary<string, string> ReadCurrentUserData(Directory directory)
+               {
+                       SegmentInfos sis = new SegmentInfos();
+                       sis.Read(directory);
+                       return sis.GetUserData();
+               }
+               
+               /// <summary>If non-null, information about retries when loading
+               /// the segments file will be printed to this.
+               /// </summary>
+               public static void  SetInfoStream(System.IO.StreamWriter infoStream)
+               {
+                       SegmentInfos.infoStream = infoStream;
+               }
+               
+               /* Advanced configuration of retry logic in loading
+               segments_N file */
+               private static int defaultGenFileRetryCount = 10;
+               private static int defaultGenFileRetryPauseMsec = 50;
+               private static int defaultGenLookaheadCount = 10;
+               
+               /// <summary> Advanced: set how many times to try loading the
+               /// segments.gen file contents to determine current segment
+               /// generation.  This file is only referenced when the
+               /// primary method (listing the directory) fails.
+               /// </summary>
+               public static void  SetDefaultGenFileRetryCount(int count)
+               {
+                       defaultGenFileRetryCount = count;
+               }
+               
+               /// <seealso cref="setDefaultGenFileRetryCount">
+               /// </seealso>
+               public static int GetDefaultGenFileRetryCount()
+               {
+                       return defaultGenFileRetryCount;
+               }
+               
+               /// <summary> Advanced: set how many milliseconds to pause in between
+               /// attempts to load the segments.gen file.
+               /// </summary>
+               public static void  SetDefaultGenFileRetryPauseMsec(int msec)
+               {
+                       defaultGenFileRetryPauseMsec = msec;
+               }
+               
+               /// <seealso cref="setDefaultGenFileRetryPauseMsec">
+               /// </seealso>
+               public static int GetDefaultGenFileRetryPauseMsec()
+               {
+                       return defaultGenFileRetryPauseMsec;
+               }
+               
+               /// <summary> Advanced: set how many times to try incrementing the
+               /// gen when loading the segments file.  This only runs if
+               /// the primary (listing directory) and secondary (opening
+               /// segments.gen file) methods fail to find the segments
+               /// file.
+               /// </summary>
+               public static void  SetDefaultGenLookaheadCount(int count)
+               {
+                       defaultGenLookaheadCount = count;
+               }
+               /// <seealso cref="setDefaultGenLookaheadCount">
+               /// </seealso>
+               public static int GetDefaultGenLookahedCount()
+               {
+                       return defaultGenLookaheadCount;
+               }
+               
+               /// <seealso cref="setInfoStream">
+               /// </seealso>
+               public static System.IO.StreamWriter GetInfoStream()
+               {
+                       return infoStream;
+               }
+               
+               private static void  Message(System.String message)
+               {
+                       if (infoStream != null)
+                       {
+                               infoStream.WriteLine("SIS [" + SupportClass.ThreadClass.Current().Name + "]: " + message);
+                       }
+               }
+               
+               /// <summary> Utility class for executing code that needs to do
+               /// something with the current segments file.  This is
+               /// necessary with lock-less commits because from the time
+               /// you locate the current segments file name, until you
+               /// actually open it, read its contents, or check modified
+               /// time, etc., it could have been deleted due to a writer
+               /// commit finishing.
+               /// </summary>
+               public abstract class FindSegmentsFile
+               {
+                       
+                       internal Directory directory;
+                       
+                       public FindSegmentsFile(Directory directory)
+                       {
+                               this.directory = directory;
+                       }
+                       
+                       public System.Object Run()
+                       {
+                               return Run(null);
+                       }
+                       
+                       public System.Object Run(IndexCommit commit)
+                       {
+                               if (commit != null)
+                               {
+                                       if (directory != commit.GetDirectory())
+                                               throw new System.IO.IOException("the specified commit does not match the specified Directory");
+                                       return DoBody(commit.GetSegmentsFileName());
+                               }
+                               
+                               System.String segmentFileName = null;
+                               long lastGen = - 1;
+                               long gen = 0;
+                               int genLookaheadCount = 0;
+                               System.IO.IOException exc = null;
+                               bool retry = false;
+                               
+                               int method = 0;
+                               
+                               // Loop until we succeed in calling doBody() without
+                               // hitting an IOException.  An IOException most likely
+                               // means a commit was in process and has finished, in
+                               // the time it took us to load the now-old infos files
+                               // (and segments files).  It's also possible it's a
+                               // true error (corrupt index).  To distinguish these,
+                               // on each retry we must see "forward progress" on
+                               // which generation we are trying to load.  If we
+                               // don't, then the original error is real and we throw
+                               // it.
+                               
+                               // We have three methods for determining the current
+                               // generation.  We try the first two in parallel, and
+                               // fall back to the third when necessary.
+                               
+                               while (true)
+                               {
+                                       
+                                       if (0 == method)
+                                       {
+                                               
+                                               // Method 1: list the directory and use the highest
+                                               // segments_N file.  This method works well as long
+                                               // as there is no stale caching on the directory
+                                               // contents (NOTE: NFS clients often have such stale
+                                               // caching):
+                                               System.String[] files = null;
+                                               
+                                               long genA = - 1;
+                                               
+                                               files = directory.ListAll();
+                                               
+                                               if (files != null)
+                                                       genA = Mono.Lucene.Net.Index.SegmentInfos.GetCurrentSegmentGeneration(files);
+                                               
+                                               Mono.Lucene.Net.Index.SegmentInfos.Message("directory listing genA=" + genA);
+                                               
+                                               // Method 2: open segments.gen and read its
+                                               // contents.  Then we take the larger of the two
+                                               // gens.  This way, if either approach is hitting
+                                               // a stale cache (NFS) we have a better chance of
+                                               // getting the right generation.
+                                               long genB = - 1;
+                                               for (int i = 0; i < Mono.Lucene.Net.Index.SegmentInfos.defaultGenFileRetryCount; i++)
+                                               {
+                                                       IndexInput genInput = null;
+                                                       try
+                                                       {
+                                                               genInput = directory.OpenInput(IndexFileNames.SEGMENTS_GEN);
+                                                       }
+                                                       catch (System.IO.FileNotFoundException e)
+                                                       {
+                                                               Mono.Lucene.Net.Index.SegmentInfos.Message("segments.gen open: FileNotFoundException " + e);
+                                                               break;
+                                                       }
+                                                       catch (System.IO.IOException e)
+                                                       {
+                                                               Mono.Lucene.Net.Index.SegmentInfos.Message("segments.gen open: IOException " + e);
+                                                       }
+                                                       
+                                                       if (genInput != null)
+                                                       {
+                                                               try
+                                                               {
+                                                                       int version = genInput.ReadInt();
+                                                                       if (version == Mono.Lucene.Net.Index.SegmentInfos.FORMAT_LOCKLESS)
+                                                                       {
+                                                                               long gen0 = genInput.ReadLong();
+                                                                               long gen1 = genInput.ReadLong();
+                                                                               Mono.Lucene.Net.Index.SegmentInfos.Message("fallback check: " + gen0 + "; " + gen1);
+                                                                               if (gen0 == gen1)
+                                                                               {
+                                                                                       // The file is consistent.
+                                                                                       genB = gen0;
+                                                                                       break;
+                                                                               }
+                                                                       }
+                                                               }
+                                                               catch (System.IO.IOException err2)
+                                                               {
+                                                                       // will retry
+                                                               }
+                                                               finally
+                                                               {
+                                                                       genInput.Close();
+                                                               }
+                                                       }
+                                                       try
+                                                       {
+                                                               System.Threading.Thread.Sleep(new System.TimeSpan((System.Int64) 10000 * Mono.Lucene.Net.Index.SegmentInfos.defaultGenFileRetryPauseMsec));
+                                                       }
+                                                       catch (System.Threading.ThreadInterruptedException ie)
+                                                       {
+                                                               // In 3.0 we will change this to throw
+                                                               // InterruptedException instead
+                                                               SupportClass.ThreadClass.Current().Interrupt();
+                                                               throw new System.SystemException(ie.Message, ie);
+                                                       }
+                                               }
+                                               
+                                               Mono.Lucene.Net.Index.SegmentInfos.Message(IndexFileNames.SEGMENTS_GEN + " check: genB=" + genB);
+                                               
+                                               // Pick the larger of the two gen's:
+                                               if (genA > genB)
+                                                       gen = genA;
+                                               else
+                                                       gen = genB;
+                                               
+                                               if (gen == - 1)
+                                               {
+                                                       // Neither approach found a generation
+                                                       System.String s;
+                                                       if (files != null)
+                                                       {
+                                                               s = "";
+                                                               for (int i = 0; i < files.Length; i++)
+                                                                       s += (" " + files[i]);
+                                                       }
+                                                       else
+                                                               s = " null";
+                                                       throw new System.IO.FileNotFoundException("no segments* file found in " + directory + ": files:" + s);
+                                               }
+                                       }
+                                       
+                                       // Third method (fallback if first & second methods
+                                       // are not reliable): since both directory cache and
+                                       // file contents cache seem to be stale, just
+                                       // advance the generation.
+                                       if (1 == method || (0 == method && lastGen == gen && retry))
+                                       {
+                                               
+                                               method = 1;
+                                               
+                                               if (genLookaheadCount < Mono.Lucene.Net.Index.SegmentInfos.defaultGenLookaheadCount)
+                                               {
+                                                       gen++;
+                                                       genLookaheadCount++;
+                                                       Mono.Lucene.Net.Index.SegmentInfos.Message("look ahead increment gen to " + gen);
+                                               }
+                                       }
+                                       
+                                       if (lastGen == gen)
+                                       {
+                                               
+                                               // This means we're about to try the same
+                                               // segments_N last tried.  This is allowed,
+                                               // exactly once, because writer could have been in
+                                               // the process of writing segments_N last time.
+                                               
+                                               if (retry)
+                                               {
+                                                       // OK, we've tried the same segments_N file
+                                                       // twice in a row, so this must be a real
+                                                       // error.  We throw the original exception we
+                                                       // got.
+                                                       throw exc;
+                                               }
+                                               else
+                                               {
+                                                       retry = true;
+                                               }
+                                       }
+                                       else if (0 == method)
+                                       {
+                                               // Segment file has advanced since our last loop, so
+                                               // reset retry:
+                                               retry = false;
+                                       }
+                                       
+                                       lastGen = gen;
+                                       
+                                       segmentFileName = IndexFileNames.FileNameFromGeneration(IndexFileNames.SEGMENTS, "", gen);
+                                       
+                                       try
+                                       {
+                                               System.Object v = DoBody(segmentFileName);
+                                               Mono.Lucene.Net.Index.SegmentInfos.Message("success on " + segmentFileName);
+                                               
+                                               return v;
+                                       }
+                                       catch (System.IO.IOException err)
+                                       {
+                                               
+                                               // Save the original root cause:
+                                               if (exc == null)
+                                               {
+                                                       exc = err;
+                                               }
+                                               
+                                               Mono.Lucene.Net.Index.SegmentInfos.Message("primary Exception on '" + segmentFileName + "': " + err + "'; will retry: retry=" + retry + "; gen = " + gen);
+                                               
+                                               if (!retry && gen > 1)
+                                               {
+                                                       
+                                                       // This is our first time trying this segments
+                                                       // file (because retry is false), and, there is
+                                                       // possibly a segments_(N-1) (because gen > 1).
+                                                       // So, check if the segments_(N-1) exists and
+                                                       // try it if so:
+                                                       System.String prevSegmentFileName = IndexFileNames.FileNameFromGeneration(IndexFileNames.SEGMENTS, "", gen - 1);
+                                                       
+                                                       bool prevExists;
+                                                       prevExists = directory.FileExists(prevSegmentFileName);
+                                                       
+                                                       if (prevExists)
+                                                       {
+                                                               Mono.Lucene.Net.Index.SegmentInfos.Message("fallback to prior segment file '" + prevSegmentFileName + "'");
+                                                               try
+                                                               {
+                                                                       System.Object v = DoBody(prevSegmentFileName);
+                                                                       if (exc != null)
+                                                                       {
+                                                                               Mono.Lucene.Net.Index.SegmentInfos.Message("success on fallback " + prevSegmentFileName);
+                                                                       }
+                                                                       return v;
+                                                               }
+                                                               catch (System.IO.IOException err2)
+                                                               {
+                                                                       Mono.Lucene.Net.Index.SegmentInfos.Message("secondary Exception on '" + prevSegmentFileName + "': " + err2 + "'; will retry");
+                                                               }
+                                                       }
+                                               }
+                                       }
+                               }
+                       }
+                       
+                       /// <summary> Subclass must implement this.  The assumption is an
+                       /// IOException will be thrown if something goes wrong
+                       /// during the processing that could have been caused by
+                       /// a writer committing.
+                       /// </summary>
+                       public /*internal*/ abstract System.Object DoBody(System.String segmentFileName);
+               }
+               
+               /// <summary> Returns a new SegmentInfos containg the SegmentInfo
+               /// instances in the specified range first (inclusive) to
+               /// last (exclusive), so total number of segments returned
+               /// is last-first.
+               /// </summary>
+               public SegmentInfos Range(int first, int last)
+               {
+                       SegmentInfos infos = new SegmentInfos();
+                       infos.AddRange((System.Collections.IList) ((System.Collections.ArrayList) this).GetRange(first, last - first));
+                       return infos;
+               }
+               
+               // Carry over generation numbers from another SegmentInfos
+               internal void  UpdateGeneration(SegmentInfos other)
+               {
+                       lastGeneration = other.lastGeneration;
+                       generation = other.generation;
+                       version = other.version;
+               }
+               
+               internal void  RollbackCommit(Directory dir)
+               {
+                       if (pendingSegnOutput != null)
+                       {
+                               try
+                               {
+                                       pendingSegnOutput.Close();
+                               }
+                               catch (System.Exception t)
+                               {
+                                       // Suppress so we keep throwing the original exception
+                                       // in our caller
+                               }
+                               
+                               // Must carefully compute fileName from "generation"
+                               // since lastGeneration isn't incremented:
+                               try
+                               {
+                                       System.String segmentFileName = IndexFileNames.FileNameFromGeneration(IndexFileNames.SEGMENTS, "", generation);
+                                       dir.DeleteFile(segmentFileName);
+                               }
+                               catch (System.Exception t)
+                               {
+                                       // Suppress so we keep throwing the original exception
+                                       // in our caller
+                               }
+                               pendingSegnOutput = null;
+                       }
+               }
+               
+               /// <summary>Call this to start a commit.  This writes the new
+               /// segments file, but writes an invalid checksum at the
+               /// end, so that it is not visible to readers.  Once this
+               /// is called you must call {@link #finishCommit} to complete
+               /// the commit or {@link #rollbackCommit} to abort it. 
+               /// </summary>
+               internal void  PrepareCommit(Directory dir)
+               {
+                       if (pendingSegnOutput != null)
+                               throw new System.SystemException("prepareCommit was already called");
+                       Write(dir);
+               }
+               
+               /// <summary>Returns all file names referenced by SegmentInfo
+               /// instances matching the provided Directory (ie files
+               /// associated with any "external" segments are skipped).
+               /// The returned collection is recomputed on each
+               /// invocation.  
+               /// </summary>
+        public System.Collections.Generic.ICollection<string> Files(Directory dir, bool includeSegmentsFile)
+               {
+            System.Collections.Generic.Dictionary<string, string> files = new System.Collections.Generic.Dictionary<string, string>();
+                       if (includeSegmentsFile)
+                       {
+                string tmp = GetCurrentSegmentFileName();
+                files.Add(tmp, tmp);
+                       }
+                       int size = Count;
+                       for (int i = 0; i < size; i++)
+                       {
+                               SegmentInfo info = Info(i);
+                               if (info.dir == dir)
+                               {
+                                       SupportClass.CollectionsHelper.AddAllIfNotContains(files, Info(i).Files());
+                               }
+                       }
+                       return files.Keys;
+               }
+               
+               internal void  FinishCommit(Directory dir)
+               {
+                       if (pendingSegnOutput == null)
+                               throw new System.SystemException("prepareCommit was not called");
+                       bool success = false;
+                       try
+                       {
+                               pendingSegnOutput.FinishCommit();
+                               pendingSegnOutput.Close();
+                               pendingSegnOutput = null;
+                               success = true;
+                       }
+                       finally
+                       {
+                               if (!success)
+                                       RollbackCommit(dir);
+                       }
+                       
+                       // NOTE: if we crash here, we have left a segments_N
+                       // file in the directory in a possibly corrupt state (if
+                       // some bytes made it to stable storage and others
+                       // didn't).  But, the segments_N file includes checksum
+                       // at the end, which should catch this case.  So when a
+                       // reader tries to read it, it will throw a
+                       // CorruptIndexException, which should cause the retry
+                       // logic in SegmentInfos to kick in and load the last
+                       // good (previous) segments_N-1 file.
+                       
+                       System.String fileName = IndexFileNames.FileNameFromGeneration(IndexFileNames.SEGMENTS, "", generation);
+                       success = false;
+                       try
+                       {
+                               dir.Sync(fileName);
+                               success = true;
+                       }
+                       finally
+                       {
+                               if (!success)
+                               {
+                                       try
+                                       {
+                                               dir.DeleteFile(fileName);
+                                       }
+                                       catch (System.Exception t)
+                                       {
+                                               // Suppress so we keep throwing the original exception
+                                       }
+                               }
+                       }
+                       
+                       lastGeneration = generation;
+                       
+                       try
+                       {
+                               IndexOutput genOutput = dir.CreateOutput(IndexFileNames.SEGMENTS_GEN);
+                               try
+                               {
+                                       genOutput.WriteInt(FORMAT_LOCKLESS);
+                                       genOutput.WriteLong(generation);
+                                       genOutput.WriteLong(generation);
+                               }
+                               finally
+                               {
+                                       genOutput.Close();
+                               }
+                       }
+                       catch (System.Exception t)
+                       {
+                               // It's OK if we fail to write this file since it's
+                               // used only as one of the retry fallbacks.
+                       }
+               }
+               
+               /// <summary>Writes &amp; syncs to the Directory dir, taking care to
+               /// remove the segments file on exception 
+               /// </summary>
+               public /*internal*/ void  Commit(Directory dir)
+               {
+                       PrepareCommit(dir);
+                       FinishCommit(dir);
+               }
+               
+               public System.String SegString(Directory directory)
+               {
+                       lock (this)
+                       {
+                               System.Text.StringBuilder buffer = new System.Text.StringBuilder();
+                               int count = Count;
+                               for (int i = 0; i < count; i++)
+                               {
+                                       if (i > 0)
+                                       {
+                                               buffer.Append(' ');
+                                       }
+                                       SegmentInfo info = Info(i);
+                                       buffer.Append(info.SegString(directory));
+                                       if (info.dir != directory)
+                                               buffer.Append("**");
+                               }
+                               return buffer.ToString();
+                       }
+               }
+               
+               public System.Collections.Generic.IDictionary<string,string> GetUserData()
+               {
+                       return userData;
+               }
+
+        internal void SetUserData(System.Collections.Generic.IDictionary<string, string> data)
+               {
+                       if (data == null)
+                       {
+                               userData = new System.Collections.Generic.Dictionary<string,string>();
+                       }
+                       else
+                       {
+                               userData = data;
+                       }
+               }
+               
+               /// <summary>Replaces all segments in this instance, but keeps
+               /// generation, version, counter so that future commits
+               /// remain write once.
+               /// </summary>
+               internal void  Replace(SegmentInfos other)
+               {
+                       Clear();
+                       AddRange(other);
+                       lastGeneration = other.lastGeneration;
+               }
+               
+               // Used only for testing
+               public bool HasExternalSegments(Directory dir)
+               {
+                       int numSegments = Count;
+                       for (int i = 0; i < numSegments; i++)
+                               if (Info(i).dir != dir)
+                                       return true;
+                       return false;
+        }
+
+        #region Lucene.NET (Equals & GetHashCode )
+        /// <summary>
+        /// Simple brute force implementation.
+        /// If size is equal, compare items one by one.
+        /// </summary>
+        /// <param name="obj">SegmentInfos object to check equality for</param>
+        /// <returns>true if lists are equal, false otherwise</returns>
+        public override bool Equals(object obj)
+        {
+            if (obj == null) return false;
+
+            SegmentInfos objToCompare = obj as SegmentInfos;
+            if (objToCompare == null) return false;
+
+            if (this.Count != objToCompare.Count) return false;
+
+            for (int idx = 0; idx < this.Count; idx++)
+            {
+                if (!this[idx].Equals(objToCompare[idx])) return false;
+            }
+
+            return true;
+        }
+
+        /// <summary>
+        /// Calculate hash code of SegmentInfos
+        /// </summary>
+        /// <returns>hash code as in java version of ArrayList</returns>
+        public override int GetHashCode()
+        {
+            int h = 1;
+            for (int i = 0; i < this.Count; i++)
+            {
+                SegmentInfo si = (this[i] as SegmentInfo);
+                h = 31 * h + (si == null ? 0 : si.GetHashCode());
+            }
+
+            return h;
+        }
+        #endregion
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/SegmentMergeInfo.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/SegmentMergeInfo.cs
new file mode 100644 (file)
index 0000000..6b0d75a
--- /dev/null
@@ -0,0 +1,101 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       sealed class SegmentMergeInfo
+       {
+               internal Term term;
+               internal int base_Renamed;
+               internal int ord; // the position of the segment in a MultiReader
+               internal TermEnum termEnum;
+               internal IndexReader reader;
+               internal int delCount;
+               private TermPositions postings; // use getPositions()
+               private int[] docMap; // use getDocMap()
+               
+               internal SegmentMergeInfo(int b, TermEnum te, IndexReader r)
+               {
+                       base_Renamed = b;
+                       reader = r;
+                       termEnum = te;
+                       term = te.Term();
+               }
+               
+               // maps around deleted docs
+               internal int[] GetDocMap()
+               {
+                       if (docMap == null)
+                       {
+                               delCount = 0;
+                               // build array which maps document numbers around deletions 
+                               if (reader.HasDeletions())
+                               {
+                                       int maxDoc = reader.MaxDoc();
+                                       docMap = new int[maxDoc];
+                                       int j = 0;
+                                       for (int i = 0; i < maxDoc; i++)
+                                       {
+                                               if (reader.IsDeleted(i))
+                                               {
+                                                       delCount++;
+                                                       docMap[i] = - 1;
+                                               }
+                                               else
+                                                       docMap[i] = j++;
+                                       }
+                               }
+                       }
+                       return docMap;
+               }
+               
+               internal TermPositions GetPositions()
+               {
+                       if (postings == null)
+                       {
+                               postings = reader.TermPositions();
+                       }
+                       return postings;
+               }
+               
+               internal bool Next()
+               {
+                       if (termEnum.Next())
+                       {
+                               term = termEnum.Term();
+                               return true;
+                       }
+                       else
+                       {
+                               term = null;
+                               return false;
+                       }
+               }
+               
+               internal void  Close()
+               {
+                       termEnum.Close();
+                       if (postings != null)
+                       {
+                               postings.Close();
+                       }
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/SegmentMergeQueue.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/SegmentMergeQueue.cs
new file mode 100644 (file)
index 0000000..edab213
--- /dev/null
@@ -0,0 +1,49 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using PriorityQueue = Mono.Lucene.Net.Util.PriorityQueue;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       sealed class SegmentMergeQueue:PriorityQueue
+       {
+               internal SegmentMergeQueue(int size)
+               {
+                       Initialize(size);
+               }
+               
+               public override bool LessThan(System.Object a, System.Object b)
+               {
+                       SegmentMergeInfo stiA = (SegmentMergeInfo) a;
+                       SegmentMergeInfo stiB = (SegmentMergeInfo) b;
+                       int comparison = stiA.term.CompareTo(stiB.term);
+                       if (comparison == 0)
+                               return stiA.base_Renamed < stiB.base_Renamed;
+                       else
+                               return comparison < 0;
+               }
+               
+               internal void  Close()
+               {
+                       while (Top() != null)
+                               ((SegmentMergeInfo) Pop()).Close();
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/SegmentMerger.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/SegmentMerger.cs
new file mode 100644 (file)
index 0000000..1b1a163
--- /dev/null
@@ -0,0 +1,971 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using Document = Mono.Lucene.Net.Documents.Document;
+using FieldSelector = Mono.Lucene.Net.Documents.FieldSelector;
+using FieldSelectorResult = Mono.Lucene.Net.Documents.FieldSelectorResult;
+using FieldOption = Mono.Lucene.Net.Index.IndexReader.FieldOption;
+using MergeAbortedException = Mono.Lucene.Net.Index.MergePolicy.MergeAbortedException;
+using Directory = Mono.Lucene.Net.Store.Directory;
+using IndexInput = Mono.Lucene.Net.Store.IndexInput;
+using IndexOutput = Mono.Lucene.Net.Store.IndexOutput;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       /// <summary> The SegmentMerger class combines two or more Segments, represented by an IndexReader ({@link #add},
+       /// into a single Segment.  After adding the appropriate readers, call the merge method to combine the 
+       /// segments.
+       /// <p/> 
+       /// If the compoundFile flag is set, then the segments will be merged into a compound file.
+       /// 
+       /// 
+       /// </summary>
+       /// <seealso cref="merge">
+       /// </seealso>
+       /// <seealso cref="add">
+       /// </seealso>
+       public sealed class SegmentMerger
+       {
+               private class AnonymousClassCheckAbort:CheckAbort
+               {
+                       private void  InitBlock(SegmentMerger enclosingInstance)
+                       {
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private SegmentMerger enclosingInstance;
+                       public SegmentMerger Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       internal AnonymousClassCheckAbort(SegmentMerger enclosingInstance, Mono.Lucene.Net.Index.MergePolicy.OneMerge Param1, Mono.Lucene.Net.Store.Directory Param2):base(Param1, Param2)
+                       {
+                               InitBlock(enclosingInstance);
+                       }
+                       public override void  Work(double units)
+                       {
+                               // do nothing
+                       }
+               }
+               private class AnonymousClassCheckAbort1:CheckAbort
+               {
+                       private void  InitBlock(SegmentMerger enclosingInstance)
+                       {
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private SegmentMerger enclosingInstance;
+                       public SegmentMerger Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       internal AnonymousClassCheckAbort1(SegmentMerger enclosingInstance, Mono.Lucene.Net.Index.MergePolicy.OneMerge Param1, Mono.Lucene.Net.Store.Directory Param2):base(Param1, Param2)
+                       {
+                               InitBlock(enclosingInstance);
+                       }
+                       public override void  Work(double units)
+                       {
+                               // do nothing
+                       }
+               }
+               [Serializable]
+               private class AnonymousClassFieldSelector : FieldSelector
+               {
+                       public AnonymousClassFieldSelector(SegmentMerger enclosingInstance)
+                       {
+                               InitBlock(enclosingInstance);
+                       }
+                       private void  InitBlock(SegmentMerger enclosingInstance)
+                       {
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private SegmentMerger enclosingInstance;
+                       public SegmentMerger Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       public FieldSelectorResult Accept(System.String fieldName)
+                       {
+                               return FieldSelectorResult.LOAD_FOR_MERGE;
+                       }
+               }
+               private void  InitBlock()
+               {
+                       termIndexInterval = IndexWriter.DEFAULT_TERM_INDEX_INTERVAL;
+               }
+               
+               /// <summary>norms header placeholder </summary>
+               internal static readonly byte[] NORMS_HEADER = new byte[]{(byte) 'N', (byte) 'R', (byte) 'M', unchecked((byte) - 1)};
+               
+               private Directory directory;
+               private System.String segment;
+               private int termIndexInterval;
+               
+               private System.Collections.IList readers = new System.Collections.ArrayList();
+               private FieldInfos fieldInfos;
+               
+               private int mergedDocs;
+               
+               private CheckAbort checkAbort;
+               
+               // Whether we should merge doc stores (stored fields and
+               // vectors files).  When all segments we are merging
+               // already share the same doc store files, we don't need
+               // to merge the doc stores.
+               private bool mergeDocStores;
+               
+               /// <summary>Maximum number of contiguous documents to bulk-copy
+               /// when merging stored fields 
+               /// </summary>
+               private const int MAX_RAW_MERGE_DOCS = 4192;
+               
+               /// <summary>This ctor used only by test code.
+               /// 
+               /// </summary>
+               /// <param name="dir">The Directory to merge the other segments into
+               /// </param>
+               /// <param name="name">The name of the new segment
+               /// </param>
+               public /*internal*/ SegmentMerger(Directory dir, System.String name)
+               {
+                       InitBlock();
+                       directory = dir;
+                       segment = name;
+                       checkAbort = new AnonymousClassCheckAbort(this, null, null);
+               }
+               
+               internal SegmentMerger(IndexWriter writer, System.String name, MergePolicy.OneMerge merge)
+               {
+                       InitBlock();
+                       directory = writer.GetDirectory();
+                       segment = name;
+                       if (merge != null)
+                       {
+                               checkAbort = new CheckAbort(merge, directory);
+                       }
+                       else
+                       {
+                               checkAbort = new AnonymousClassCheckAbort1(this, null, null);
+                       }
+                       termIndexInterval = writer.GetTermIndexInterval();
+               }
+               
+               internal bool HasProx()
+               {
+                       return fieldInfos.HasProx();
+               }
+               
+               /// <summary> Add an IndexReader to the collection of readers that are to be merged</summary>
+               /// <param name="reader">
+               /// </param>
+               public /*internal*/ void  Add(IndexReader reader)
+               {
+                       readers.Add(reader);
+               }
+               
+               /// <summary> </summary>
+               /// <param name="i">The index of the reader to return
+               /// </param>
+               /// <returns> The ith reader to be merged
+               /// </returns>
+               internal IndexReader SegmentReader(int i)
+               {
+                       return (IndexReader) readers[i];
+               }
+               
+               /// <summary> Merges the readers specified by the {@link #add} method into the directory passed to the constructor</summary>
+               /// <returns> The number of documents that were merged
+               /// </returns>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               public /*internal*/ int Merge()
+               {
+                       return Merge(true);
+               }
+               
+               /// <summary> Merges the readers specified by the {@link #add} method
+               /// into the directory passed to the constructor.
+               /// </summary>
+               /// <param name="mergeDocStores">if false, we will not merge the
+               /// stored fields nor vectors files
+               /// </param>
+               /// <returns> The number of documents that were merged
+               /// </returns>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               internal int Merge(bool mergeDocStores)
+               {
+                       
+                       this.mergeDocStores = mergeDocStores;
+                       
+                       // NOTE: it's important to add calls to
+                       // checkAbort.work(...) if you make any changes to this
+                       // method that will spend alot of time.  The frequency
+                       // of this check impacts how long
+                       // IndexWriter.close(false) takes to actually stop the
+                       // threads.
+                       
+                       mergedDocs = MergeFields();
+                       MergeTerms();
+                       MergeNorms();
+                       
+                       if (mergeDocStores && fieldInfos.HasVectors())
+                               MergeVectors();
+                       
+                       return mergedDocs;
+               }
+               
+               /// <summary> close all IndexReaders that have been added.
+               /// Should not be called before merge().
+               /// </summary>
+               /// <throws>  IOException </throws>
+               public /*internal*/ void  CloseReaders()
+               {
+                       for (System.Collections.IEnumerator iter = readers.GetEnumerator(); iter.MoveNext(); )
+                       {
+                               ((IndexReader) iter.Current).Close();
+                       }
+               }
+
+        public /*internal*/ System.Collections.Generic.ICollection<string> GetMergedFiles()
+               {
+            System.Collections.Generic.IDictionary<string,string> fileSet = new System.Collections.Generic.Dictionary<string,string>();
+                       
+                       // Basic files
+                       for (int i = 0; i < IndexFileNames.COMPOUND_EXTENSIONS.Length; i++)
+                       {
+                               System.String ext = IndexFileNames.COMPOUND_EXTENSIONS[i];
+                               
+                               if (ext.Equals(IndexFileNames.PROX_EXTENSION) && !HasProx())
+                                       continue;
+                               
+                               if (mergeDocStores || (!ext.Equals(IndexFileNames.FIELDS_EXTENSION) && !ext.Equals(IndexFileNames.FIELDS_INDEX_EXTENSION)))
+                    fileSet[segment + "." + ext] = segment + "." + ext;
+                       }
+                       
+                       // Fieldable norm files
+                       for (int i = 0; i < fieldInfos.Size(); i++)
+                       {
+                               FieldInfo fi = fieldInfos.FieldInfo(i);
+                               if (fi.isIndexed && !fi.omitNorms)
+                               {
+                    fileSet[segment + "." + IndexFileNames.NORMS_EXTENSION]=segment + "." + IndexFileNames.NORMS_EXTENSION;
+                                       break;
+                               }
+                       }
+                       
+                       // Vector files
+                       if (fieldInfos.HasVectors() && mergeDocStores)
+                       {
+                               for (int i = 0; i < IndexFileNames.VECTOR_EXTENSIONS.Length; i++)
+                               {
+                    fileSet[segment + "." + IndexFileNames.VECTOR_EXTENSIONS[i]] = segment + "." + IndexFileNames.VECTOR_EXTENSIONS[i];
+                               }
+                       }
+
+            return fileSet.Keys;
+        }
+
+        public /*internal*/ System.Collections.Generic.ICollection<string> CreateCompoundFile(System.String fileName)
+        {
+            System.Collections.Generic.ICollection<string> files = GetMergedFiles();
+            CompoundFileWriter cfsWriter = new CompoundFileWriter(directory, fileName, checkAbort);
+
+                       // Now merge all added files
+                       System.Collections.IEnumerator it = files.GetEnumerator();
+                       while (it.MoveNext())
+                       {
+                               cfsWriter.AddFile((System.String) it.Current);
+                       }
+                       
+                       // Perform the merge
+                       cfsWriter.Close();
+
+            return files;
+               }
+
+        private void AddIndexed(IndexReader reader, FieldInfos fInfos, System.Collections.Generic.ICollection<string> names, bool storeTermVectors, bool storePositionWithTermVector, bool storeOffsetWithTermVector, bool storePayloads, bool omitTFAndPositions)
+               {
+                       System.Collections.Generic.IEnumerator<string> i = names.GetEnumerator();
+                       while (i.MoveNext())
+                       {
+                System.String field = i.Current;
+                               fInfos.Add(field, true, storeTermVectors, storePositionWithTermVector, storeOffsetWithTermVector, !reader.HasNorms(field), storePayloads, omitTFAndPositions);
+                       }
+               }
+               
+               private SegmentReader[] matchingSegmentReaders;
+               private int[] rawDocLengths;
+               private int[] rawDocLengths2;
+               
+               private void  SetMatchingSegmentReaders()
+               {
+                       // If the i'th reader is a SegmentReader and has
+                       // identical fieldName -> number mapping, then this
+                       // array will be non-null at position i:
+                       int numReaders = readers.Count;
+                       matchingSegmentReaders = new SegmentReader[numReaders];
+                       
+                       // If this reader is a SegmentReader, and all of its
+                       // field name -> number mappings match the "merged"
+                       // FieldInfos, then we can do a bulk copy of the
+                       // stored fields:
+                       for (int i = 0; i < numReaders; i++)
+                       {
+                               IndexReader reader = (IndexReader) readers[i];
+                               if (reader is SegmentReader)
+                               {
+                                       SegmentReader segmentReader = (SegmentReader) reader;
+                                       bool same = true;
+                                       FieldInfos segmentFieldInfos = segmentReader.FieldInfos();
+                                       int numFieldInfos = segmentFieldInfos.Size();
+                                       for (int j = 0; same && j < numFieldInfos; j++)
+                                       {
+                                               same = fieldInfos.FieldName(j).Equals(segmentFieldInfos.FieldName(j));
+                                       }
+                                       if (same)
+                                       {
+                                               matchingSegmentReaders[i] = segmentReader;
+                                       }
+                               }
+                       }
+                       
+                       // Used for bulk-reading raw bytes for stored fields
+                       rawDocLengths = new int[MAX_RAW_MERGE_DOCS];
+                       rawDocLengths2 = new int[MAX_RAW_MERGE_DOCS];
+               }
+               
+               /// <summary> </summary>
+               /// <returns> The number of documents in all of the readers
+               /// </returns>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               private int MergeFields()
+               {
+                       
+                       if (!mergeDocStores)
+                       {
+                               // When we are not merging by doc stores, that means
+                               // all segments were written as part of a single
+                               // autoCommit=false IndexWriter session, so their field
+                               // name -> number mapping are the same.  So, we start
+                               // with the fieldInfos of the last segment in this
+                               // case, to keep that numbering.
+                               SegmentReader sr = (SegmentReader) readers[readers.Count - 1];
+                               fieldInfos = (FieldInfos) sr.core.fieldInfos.Clone();
+                       }
+                       else
+                       {
+                               fieldInfos = new FieldInfos(); // merge field names
+                       }
+                       
+                       for (System.Collections.IEnumerator iter = readers.GetEnumerator(); iter.MoveNext(); )
+                       {
+                               IndexReader reader = (IndexReader) iter.Current;
+                               if (reader is SegmentReader)
+                               {
+                                       SegmentReader segmentReader = (SegmentReader) reader;
+                                       FieldInfos readerFieldInfos = segmentReader.FieldInfos();
+                                       int numReaderFieldInfos = readerFieldInfos.Size();
+                                       for (int j = 0; j < numReaderFieldInfos; j++)
+                                       {
+                                               FieldInfo fi = readerFieldInfos.FieldInfo(j);
+                                               fieldInfos.Add(fi.name, fi.isIndexed, fi.storeTermVector, fi.storePositionWithTermVector, fi.storeOffsetWithTermVector, !reader.HasNorms(fi.name), fi.storePayloads, fi.omitTermFreqAndPositions);
+                                       }
+                               }
+                               else
+                               {
+                                       AddIndexed(reader, fieldInfos, reader.GetFieldNames(FieldOption.TERMVECTOR_WITH_POSITION_OFFSET), true, true, true, false, false);
+                                       AddIndexed(reader, fieldInfos, reader.GetFieldNames(FieldOption.TERMVECTOR_WITH_POSITION), true, true, false, false, false);
+                                       AddIndexed(reader, fieldInfos, reader.GetFieldNames(FieldOption.TERMVECTOR_WITH_OFFSET), true, false, true, false, false);
+                                       AddIndexed(reader, fieldInfos, reader.GetFieldNames(FieldOption.TERMVECTOR), true, false, false, false, false);
+                                       AddIndexed(reader, fieldInfos, reader.GetFieldNames(FieldOption.OMIT_TERM_FREQ_AND_POSITIONS), false, false, false, false, true);
+                                       AddIndexed(reader, fieldInfos, reader.GetFieldNames(FieldOption.STORES_PAYLOADS), false, false, false, true, false);
+                                       AddIndexed(reader, fieldInfos, reader.GetFieldNames(FieldOption.INDEXED), false, false, false, false, false);
+                                       fieldInfos.Add(reader.GetFieldNames(FieldOption.UNINDEXED), false);
+                               }
+                       }
+                       fieldInfos.Write(directory, segment + ".fnm");
+                       
+                       int docCount = 0;
+                       
+                       SetMatchingSegmentReaders();
+                       
+                       if (mergeDocStores)
+                       {
+                               
+                               // for merging we don't want to compress/uncompress the data, so to tell the FieldsReader that we're
+                               // in  merge mode, we use this FieldSelector
+                               FieldSelector fieldSelectorMerge = new AnonymousClassFieldSelector(this);
+                               
+                               // merge field values
+                               FieldsWriter fieldsWriter = new FieldsWriter(directory, segment, fieldInfos);
+                               
+                               try
+                               {
+                                       int idx = 0;
+                                       for (System.Collections.IEnumerator iter = readers.GetEnumerator(); iter.MoveNext(); )
+                                       {
+                                               IndexReader reader = (IndexReader) iter.Current;
+                                               SegmentReader matchingSegmentReader = matchingSegmentReaders[idx++];
+                                               FieldsReader matchingFieldsReader = null;
+                                               if (matchingSegmentReader != null)
+                                               {
+                                                       FieldsReader fieldsReader = matchingSegmentReader.GetFieldsReader();
+                                                       if (fieldsReader != null && fieldsReader.CanReadRawDocs())
+                                                       {
+                                                               matchingFieldsReader = fieldsReader;
+                                                       }
+                                               }
+                                               if (reader.HasDeletions())
+                                               {
+                                                       docCount += CopyFieldsWithDeletions(fieldSelectorMerge, fieldsWriter, reader, matchingFieldsReader);
+                                               }
+                                               else
+                                               {
+                                                       docCount += CopyFieldsNoDeletions(fieldSelectorMerge, fieldsWriter, reader, matchingFieldsReader);
+                                               }
+                                       }
+                               }
+                               finally
+                               {
+                                       fieldsWriter.Close();
+                               }
+                               
+                               System.String fileName = segment + "." + IndexFileNames.FIELDS_INDEX_EXTENSION;
+                               long fdxFileLength = directory.FileLength(fileName);
+                               
+                               if (4 + ((long) docCount) * 8 != fdxFileLength)
+                               // This is most likely a bug in Sun JRE 1.6.0_04/_05;
+                               // we detect that the bug has struck, here, and
+                               // throw an exception to prevent the corruption from
+                               // entering the index.  See LUCENE-1282 for
+                               // details.
+                                       throw new System.SystemException("mergeFields produced an invalid result: docCount is " + docCount + " but fdx file size is " + fdxFileLength + " file=" + fileName + " file exists?=" + directory.FileExists(fileName) + "; now aborting this merge to prevent index corruption");
+                       }
+                       // If we are skipping the doc stores, that means there
+                       // are no deletions in any of these segments, so we
+                       // just sum numDocs() of each segment to get total docCount
+                       else
+                       {
+                               for (System.Collections.IEnumerator iter = readers.GetEnumerator(); iter.MoveNext(); )
+                               {
+                                       docCount += ((IndexReader) iter.Current).NumDocs();
+                               }
+                       }
+                       
+                       return docCount;
+               }
+               
+               private int CopyFieldsWithDeletions(FieldSelector fieldSelectorMerge, FieldsWriter fieldsWriter, IndexReader reader, FieldsReader matchingFieldsReader)
+               {
+                       int docCount = 0;
+                       int maxDoc = reader.MaxDoc();
+                       if (matchingFieldsReader != null)
+                       {
+                               // We can bulk-copy because the fieldInfos are "congruent"
+                               for (int j = 0; j < maxDoc; )
+                               {
+                                       if (reader.IsDeleted(j))
+                                       {
+                                               // skip deleted docs
+                                               ++j;
+                                               continue;
+                                       }
+                                       // We can optimize this case (doing a bulk byte copy) since the field 
+                                       // numbers are identical
+                                       int start = j, numDocs = 0;
+                                       do 
+                                       {
+                                               j++;
+                                               numDocs++;
+                                               if (j >= maxDoc)
+                                                       break;
+                                               if (reader.IsDeleted(j))
+                                               {
+                                                       j++;
+                                                       break;
+                                               }
+                                       }
+                                       while (numDocs < MAX_RAW_MERGE_DOCS);
+                                       
+                                       IndexInput stream = matchingFieldsReader.RawDocs(rawDocLengths, start, numDocs);
+                                       fieldsWriter.AddRawDocuments(stream, rawDocLengths, numDocs);
+                                       docCount += numDocs;
+                                       checkAbort.Work(300 * numDocs);
+                               }
+                       }
+                       else
+                       {
+                               for (int j = 0; j < maxDoc; j++)
+                               {
+                                       if (reader.IsDeleted(j))
+                                       {
+                                               // skip deleted docs
+                                               continue;
+                                       }
+                                       // NOTE: it's very important to first assign to doc then pass it to
+                                       // termVectorsWriter.addAllDocVectors; see LUCENE-1282
+                                       Document doc = reader.Document(j, fieldSelectorMerge);
+                                       fieldsWriter.AddDocument(doc);
+                                       docCount++;
+                                       checkAbort.Work(300);
+                               }
+                       }
+                       return docCount;
+               }
+               
+               private int CopyFieldsNoDeletions(FieldSelector fieldSelectorMerge, FieldsWriter fieldsWriter, IndexReader reader, FieldsReader matchingFieldsReader)
+               {
+                       int maxDoc = reader.MaxDoc();
+                       int docCount = 0;
+                       if (matchingFieldsReader != null)
+                       {
+                               // We can bulk-copy because the fieldInfos are "congruent"
+                               while (docCount < maxDoc)
+                               {
+                                       int len = System.Math.Min(MAX_RAW_MERGE_DOCS, maxDoc - docCount);
+                                       IndexInput stream = matchingFieldsReader.RawDocs(rawDocLengths, docCount, len);
+                                       fieldsWriter.AddRawDocuments(stream, rawDocLengths, len);
+                                       docCount += len;
+                                       checkAbort.Work(300 * len);
+                               }
+                       }
+                       else
+                       {
+                               for (; docCount < maxDoc; docCount++)
+                               {
+                                       // NOTE: it's very important to first assign to doc then pass it to
+                                       // termVectorsWriter.addAllDocVectors; see LUCENE-1282
+                                       Document doc = reader.Document(docCount, fieldSelectorMerge);
+                                       fieldsWriter.AddDocument(doc);
+                                       checkAbort.Work(300);
+                               }
+                       }
+                       return docCount;
+               }
+               
+               /// <summary> Merge the TermVectors from each of the segments into the new one.</summary>
+               /// <throws>  IOException </throws>
+               private void  MergeVectors()
+               {
+                       TermVectorsWriter termVectorsWriter = new TermVectorsWriter(directory, segment, fieldInfos);
+                       
+                       try
+                       {
+                               int idx = 0;
+                               for (System.Collections.IEnumerator iter = readers.GetEnumerator(); iter.MoveNext(); )
+                               {
+                                       SegmentReader matchingSegmentReader = matchingSegmentReaders[idx++];
+                                       TermVectorsReader matchingVectorsReader = null;
+                                       if (matchingSegmentReader != null)
+                                       {
+                                               TermVectorsReader vectorsReader = matchingSegmentReader.GetTermVectorsReaderOrig();
+                                               
+                                               // If the TV* files are an older format then they cannot read raw docs:
+                                               if (vectorsReader != null && vectorsReader.CanReadRawDocs())
+                                               {
+                                                       matchingVectorsReader = vectorsReader;
+                                               }
+                                       }
+                                       IndexReader reader = (IndexReader) iter.Current;
+                                       if (reader.HasDeletions())
+                                       {
+                                               CopyVectorsWithDeletions(termVectorsWriter, matchingVectorsReader, reader);
+                                       }
+                                       else
+                                       {
+                                               CopyVectorsNoDeletions(termVectorsWriter, matchingVectorsReader, reader);
+                                       }
+                               }
+                       }
+                       finally
+                       {
+                               termVectorsWriter.Close();
+                       }
+                       
+                       System.String fileName = segment + "." + IndexFileNames.VECTORS_INDEX_EXTENSION;
+                       long tvxSize = directory.FileLength(fileName);
+                       
+                       if (4 + ((long) mergedDocs) * 16 != tvxSize)
+                       // This is most likely a bug in Sun JRE 1.6.0_04/_05;
+                       // we detect that the bug has struck, here, and
+                       // throw an exception to prevent the corruption from
+                       // entering the index.  See LUCENE-1282 for
+                       // details.
+                               throw new System.SystemException("mergeVectors produced an invalid result: mergedDocs is " + mergedDocs + " but tvx size is " + tvxSize + " file=" + fileName + " file exists?=" + directory.FileExists(fileName) + "; now aborting this merge to prevent index corruption");
+               }
+               
+               private void  CopyVectorsWithDeletions(TermVectorsWriter termVectorsWriter, TermVectorsReader matchingVectorsReader, IndexReader reader)
+               {
+                       int maxDoc = reader.MaxDoc();
+                       if (matchingVectorsReader != null)
+                       {
+                               // We can bulk-copy because the fieldInfos are "congruent"
+                               for (int docNum = 0; docNum < maxDoc; )
+                               {
+                                       if (reader.IsDeleted(docNum))
+                                       {
+                                               // skip deleted docs
+                                               ++docNum;
+                                               continue;
+                                       }
+                                       // We can optimize this case (doing a bulk byte copy) since the field 
+                                       // numbers are identical
+                                       int start = docNum, numDocs = 0;
+                                       do 
+                                       {
+                                               docNum++;
+                                               numDocs++;
+                                               if (docNum >= maxDoc)
+                                                       break;
+                                               if (reader.IsDeleted(docNum))
+                                               {
+                                                       docNum++;
+                                                       break;
+                                               }
+                                       }
+                                       while (numDocs < MAX_RAW_MERGE_DOCS);
+                                       
+                                       matchingVectorsReader.RawDocs(rawDocLengths, rawDocLengths2, start, numDocs);
+                                       termVectorsWriter.AddRawDocuments(matchingVectorsReader, rawDocLengths, rawDocLengths2, numDocs);
+                                       checkAbort.Work(300 * numDocs);
+                               }
+                       }
+                       else
+                       {
+                               for (int docNum = 0; docNum < maxDoc; docNum++)
+                               {
+                                       if (reader.IsDeleted(docNum))
+                                       {
+                                               // skip deleted docs
+                                               continue;
+                                       }
+                                       
+                                       // NOTE: it's very important to first assign to vectors then pass it to
+                                       // termVectorsWriter.addAllDocVectors; see LUCENE-1282
+                                       TermFreqVector[] vectors = reader.GetTermFreqVectors(docNum);
+                                       termVectorsWriter.AddAllDocVectors(vectors);
+                                       checkAbort.Work(300);
+                               }
+                       }
+               }
+               
+               private void  CopyVectorsNoDeletions(TermVectorsWriter termVectorsWriter, TermVectorsReader matchingVectorsReader, IndexReader reader)
+               {
+                       int maxDoc = reader.MaxDoc();
+                       if (matchingVectorsReader != null)
+                       {
+                               // We can bulk-copy because the fieldInfos are "congruent"
+                               int docCount = 0;
+                               while (docCount < maxDoc)
+                               {
+                                       int len = System.Math.Min(MAX_RAW_MERGE_DOCS, maxDoc - docCount);
+                                       matchingVectorsReader.RawDocs(rawDocLengths, rawDocLengths2, docCount, len);
+                                       termVectorsWriter.AddRawDocuments(matchingVectorsReader, rawDocLengths, rawDocLengths2, len);
+                                       docCount += len;
+                                       checkAbort.Work(300 * len);
+                               }
+                       }
+                       else
+                       {
+                               for (int docNum = 0; docNum < maxDoc; docNum++)
+                               {
+                                       // NOTE: it's very important to first assign to vectors then pass it to
+                                       // termVectorsWriter.addAllDocVectors; see LUCENE-1282
+                                       TermFreqVector[] vectors = reader.GetTermFreqVectors(docNum);
+                                       termVectorsWriter.AddAllDocVectors(vectors);
+                                       checkAbort.Work(300);
+                               }
+                       }
+               }
+               
+               private SegmentMergeQueue queue = null;
+               
+               private void  MergeTerms()
+               {
+                       
+                       SegmentWriteState state = new SegmentWriteState(null, directory, segment, null, mergedDocs, 0, termIndexInterval);
+                       
+                       FormatPostingsFieldsConsumer consumer = new FormatPostingsFieldsWriter(state, fieldInfos);
+                       
+                       try
+                       {
+                               queue = new SegmentMergeQueue(readers.Count);
+                               
+                               MergeTermInfos(consumer);
+                       }
+                       finally
+                       {
+                               consumer.Finish();
+                               if (queue != null)
+                                       queue.Close();
+                       }
+               }
+               
+               internal bool omitTermFreqAndPositions;
+               
+               private void  MergeTermInfos(FormatPostingsFieldsConsumer consumer)
+               {
+                       int base_Renamed = 0;
+                       int readerCount = readers.Count;
+                       for (int i = 0; i < readerCount; i++)
+                       {
+                               IndexReader reader = (IndexReader) readers[i];
+                               TermEnum termEnum = reader.Terms();
+                               SegmentMergeInfo smi = new SegmentMergeInfo(base_Renamed, termEnum, reader);
+                               int[] docMap = smi.GetDocMap();
+                               if (docMap != null)
+                               {
+                                       if (docMaps == null)
+                                       {
+                                               docMaps = new int[readerCount][];
+                                               delCounts = new int[readerCount];
+                                       }
+                                       docMaps[i] = docMap;
+                                       delCounts[i] = smi.reader.MaxDoc() - smi.reader.NumDocs();
+                               }
+                               
+                               base_Renamed += reader.NumDocs();
+                               
+                               System.Diagnostics.Debug.Assert(reader.NumDocs() == reader.MaxDoc() - smi.delCount);
+                               
+                               if (smi.Next())
+                                       queue.Add(smi);
+                               // initialize queue
+                               else
+                                       smi.Close();
+                       }
+                       
+                       SegmentMergeInfo[] match = new SegmentMergeInfo[readers.Count];
+                       
+                       System.String currentField = null;
+                       FormatPostingsTermsConsumer termsConsumer = null;
+                       
+                       while (queue.Size() > 0)
+                       {
+                               int matchSize = 0; // pop matching terms
+                               match[matchSize++] = (SegmentMergeInfo) queue.Pop();
+                               Term term = match[0].term;
+                               SegmentMergeInfo top = (SegmentMergeInfo) queue.Top();
+                               
+                               while (top != null && term.CompareTo(top.term) == 0)
+                               {
+                                       match[matchSize++] = (SegmentMergeInfo) queue.Pop();
+                                       top = (SegmentMergeInfo) queue.Top();
+                               }
+                               
+                               if ((System.Object) currentField != (System.Object) term.field)
+                               {
+                                       currentField = term.field;
+                                       if (termsConsumer != null)
+                                               termsConsumer.Finish();
+                                       FieldInfo fieldInfo = fieldInfos.FieldInfo(currentField);
+                                       termsConsumer = consumer.AddField(fieldInfo);
+                                       omitTermFreqAndPositions = fieldInfo.omitTermFreqAndPositions;
+                               }
+                               
+                               int df = AppendPostings(termsConsumer, match, matchSize); // add new TermInfo
+                               
+                               checkAbort.Work(df / 3.0);
+                               
+                               while (matchSize > 0)
+                               {
+                                       SegmentMergeInfo smi = match[--matchSize];
+                                       if (smi.Next())
+                                               queue.Add(smi);
+                                       // restore queue
+                                       else
+                                               smi.Close(); // done with a segment
+                               }
+                       }
+               }
+               
+               private byte[] payloadBuffer;
+               private int[][] docMaps;
+               internal int[][] GetDocMaps()
+               {
+                       return docMaps;
+               }
+               private int[] delCounts;
+               internal int[] GetDelCounts()
+               {
+                       return delCounts;
+               }
+               
+               /// <summary>Process postings from multiple segments all positioned on the
+               /// same term. Writes out merged entries into freqOutput and
+               /// the proxOutput streams.
+               /// 
+               /// </summary>
+               /// <param name="smis">array of segments
+               /// </param>
+               /// <param name="n">number of cells in the array actually occupied
+               /// </param>
+               /// <returns> number of documents across all segments where this term was found
+               /// </returns>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               private int AppendPostings(FormatPostingsTermsConsumer termsConsumer, SegmentMergeInfo[] smis, int n)
+               {
+                       
+                       FormatPostingsDocsConsumer docConsumer = termsConsumer.AddTerm(smis[0].term.text);
+                       int df = 0;
+                       for (int i = 0; i < n; i++)
+                       {
+                               SegmentMergeInfo smi = smis[i];
+                               TermPositions postings = smi.GetPositions();
+                               System.Diagnostics.Debug.Assert(postings != null);
+                               int base_Renamed = smi.base_Renamed;
+                               int[] docMap = smi.GetDocMap();
+                               postings.Seek(smi.termEnum);
+                               
+                               while (postings.Next())
+                               {
+                                       df++;
+                                       int doc = postings.Doc();
+                                       if (docMap != null)
+                                               doc = docMap[doc]; // map around deletions
+                                       doc += base_Renamed; // convert to merged space
+                                       
+                                       int freq = postings.Freq();
+                                       FormatPostingsPositionsConsumer posConsumer = docConsumer.AddDoc(doc, freq);
+                                       
+                                       if (!omitTermFreqAndPositions)
+                                       {
+                                               for (int j = 0; j < freq; j++)
+                                               {
+                                                       int position = postings.NextPosition();
+                                                       int payloadLength = postings.GetPayloadLength();
+                                                       if (payloadLength > 0)
+                                                       {
+                                                               if (payloadBuffer == null || payloadBuffer.Length < payloadLength)
+                                                                       payloadBuffer = new byte[payloadLength];
+                                                               postings.GetPayload(payloadBuffer, 0);
+                                                       }
+                                                       posConsumer.AddPosition(position, payloadBuffer, 0, payloadLength);
+                                               }
+                                               posConsumer.Finish();
+                                       }
+                               }
+                       }
+                       docConsumer.Finish();
+                       
+                       return df;
+               }
+               
+               private void  MergeNorms()
+               {
+                       byte[] normBuffer = null;
+                       IndexOutput output = null;
+                       try
+                       {
+                               int numFieldInfos = fieldInfos.Size();
+                               for (int i = 0; i < numFieldInfos; i++)
+                               {
+                                       FieldInfo fi = fieldInfos.FieldInfo(i);
+                                       if (fi.isIndexed && !fi.omitNorms)
+                                       {
+                                               if (output == null)
+                                               {
+                                                       output = directory.CreateOutput(segment + "." + IndexFileNames.NORMS_EXTENSION);
+                                                       output.WriteBytes(NORMS_HEADER, NORMS_HEADER.Length);
+                                               }
+                                               for (System.Collections.IEnumerator iter = readers.GetEnumerator(); iter.MoveNext(); )
+                                               {
+                                                       IndexReader reader = (IndexReader) iter.Current;
+                                                       int maxDoc = reader.MaxDoc();
+                                                       if (normBuffer == null || normBuffer.Length < maxDoc)
+                                                       {
+                                                               // the buffer is too small for the current segment
+                                                               normBuffer = new byte[maxDoc];
+                                                       }
+                                                       reader.Norms(fi.name, normBuffer, 0);
+                                                       if (!reader.HasDeletions())
+                                                       {
+                                                               //optimized case for segments without deleted docs
+                                                               output.WriteBytes(normBuffer, maxDoc);
+                                                       }
+                                                       else
+                                                       {
+                                                               // this segment has deleted docs, so we have to
+                                                               // check for every doc if it is deleted or not
+                                                               for (int k = 0; k < maxDoc; k++)
+                                                               {
+                                                                       if (!reader.IsDeleted(k))
+                                                                       {
+                                                                               output.WriteByte(normBuffer[k]);
+                                                                       }
+                                                               }
+                                                       }
+                                                       checkAbort.Work(maxDoc);
+                                               }
+                                       }
+                               }
+                       }
+                       finally
+                       {
+                               if (output != null)
+                               {
+                                       output.Close();
+                               }
+                       }
+               }
+               
+               internal class CheckAbort
+               {
+                       private double workCount;
+                       private MergePolicy.OneMerge merge;
+                       private Directory dir;
+                       public CheckAbort(MergePolicy.OneMerge merge, Directory dir)
+                       {
+                               this.merge = merge;
+                               this.dir = dir;
+                       }
+                       
+                       /// <summary> Records the fact that roughly units amount of work
+                       /// have been done since this method was last called.
+                       /// When adding time-consuming code into SegmentMerger,
+                       /// you should test different values for units to ensure
+                       /// that the time in between calls to merge.checkAborted
+                       /// is up to ~ 1 second.
+                       /// </summary>
+                       public virtual void  Work(double units)
+                       {
+                               workCount += units;
+                               if (workCount >= 10000.0)
+                               {
+                                       merge.CheckAborted(dir);
+                                       workCount = 0;
+                               }
+                       }
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/SegmentReader.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/SegmentReader.cs
new file mode 100644 (file)
index 0000000..646b66f
--- /dev/null
@@ -0,0 +1,1847 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using Document = Mono.Lucene.Net.Documents.Document;
+using FieldSelector = Mono.Lucene.Net.Documents.FieldSelector;
+using BufferedIndexInput = Mono.Lucene.Net.Store.BufferedIndexInput;
+using Directory = Mono.Lucene.Net.Store.Directory;
+using IndexInput = Mono.Lucene.Net.Store.IndexInput;
+using IndexOutput = Mono.Lucene.Net.Store.IndexOutput;
+using BitVector = Mono.Lucene.Net.Util.BitVector;
+using CloseableThreadLocal = Mono.Lucene.Net.Util.CloseableThreadLocal;
+using DefaultSimilarity = Mono.Lucene.Net.Search.DefaultSimilarity;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       /// <version>  $Id 
+       /// </version>
+       /// <summary> <p/><b>NOTE:</b> This API is new and still experimental
+       /// (subject to change suddenly in the next release)<p/>
+       /// </summary>
+       public class SegmentReader:IndexReader, System.ICloneable
+       {
+               public SegmentReader()
+               {
+                       InitBlock();
+               }
+               private void  InitBlock()
+               {
+                       fieldsReaderLocal = new FieldsReaderLocal(this);
+               }
+               protected internal bool readOnly;
+               
+               private SegmentInfo si;
+               private int readBufferSize;
+               
+               internal CloseableThreadLocal fieldsReaderLocal;
+               internal CloseableThreadLocal termVectorsLocal = new CloseableThreadLocal();
+               
+               internal BitVector deletedDocs = null;
+               internal Ref deletedDocsRef = null;
+               private bool deletedDocsDirty = false;
+               private bool normsDirty = false;
+               private int pendingDeleteCount;
+               
+               private bool rollbackHasChanges = false;
+               private bool rollbackDeletedDocsDirty = false;
+               private bool rollbackNormsDirty = false;
+        private SegmentInfo rollbackSegmentInfo;
+               private int rollbackPendingDeleteCount;
+               
+               // optionally used for the .nrm file shared by multiple norms
+               private IndexInput singleNormStream;
+               private Ref singleNormRef;
+               
+               internal CoreReaders core;
+               
+               // Holds core readers that are shared (unchanged) when
+               // SegmentReader is cloned or reopened
+               public /*internal*/ sealed class CoreReaders
+               {
+                       
+                       // Counts how many other reader share the core objects
+                       // (freqStream, proxStream, tis, etc.) of this reader;
+                       // when coreRef drops to 0, these core objects may be
+                       // closed.  A given insance of SegmentReader may be
+                       // closed, even those it shares core objects with other
+                       // SegmentReaders:
+                       private Ref ref_Renamed = new Ref();
+                       
+                       internal System.String segment;
+                       internal FieldInfos fieldInfos;
+                       internal IndexInput freqStream;
+                       internal IndexInput proxStream;
+                       internal TermInfosReader tisNoIndex;
+                       
+                       internal Directory dir;
+                       internal Directory cfsDir;
+                       internal int readBufferSize;
+                       internal int termsIndexDivisor;
+
+            internal SegmentReader origInstance;
+                       
+                       internal TermInfosReader tis;
+                       internal FieldsReader fieldsReaderOrig;
+                       internal TermVectorsReader termVectorsReaderOrig;
+                       internal CompoundFileReader cfsReader;
+                       internal CompoundFileReader storeCFSReader;
+
+            internal CoreReaders(SegmentReader origInstance, Directory dir, SegmentInfo si, int readBufferSize, int termsIndexDivisor)
+                       {
+                               segment = si.name;
+                               this.readBufferSize = readBufferSize;
+                               this.dir = dir;
+                               
+                               bool success = false;
+                               
+                               try
+                               {
+                                       Directory dir0 = dir;
+                                       if (si.GetUseCompoundFile())
+                                       {
+                                               cfsReader = new CompoundFileReader(dir, segment + "." + IndexFileNames.COMPOUND_FILE_EXTENSION, readBufferSize);
+                                               dir0 = cfsReader;
+                                       }
+                                       cfsDir = dir0;
+                                       
+                                       fieldInfos = new FieldInfos(cfsDir, segment + "." + IndexFileNames.FIELD_INFOS_EXTENSION);
+                                       
+                                       this.termsIndexDivisor = termsIndexDivisor;
+                                       TermInfosReader reader = new TermInfosReader(cfsDir, segment, fieldInfos, readBufferSize, termsIndexDivisor);
+                                       if (termsIndexDivisor == - 1)
+                                       {
+                                               tisNoIndex = reader;
+                                       }
+                                       else
+                                       {
+                                               tis = reader;
+                                               tisNoIndex = null;
+                                       }
+                                       
+                                       // make sure that all index files have been read or are kept open
+                                       // so that if an index update removes them we'll still have them
+                                       freqStream = cfsDir.OpenInput(segment + "." + IndexFileNames.FREQ_EXTENSION, readBufferSize);
+                                       
+                                       if (fieldInfos.HasProx())
+                                       {
+                                               proxStream = cfsDir.OpenInput(segment + "." + IndexFileNames.PROX_EXTENSION, readBufferSize);
+                                       }
+                                       else
+                                       {
+                                               proxStream = null;
+                                       }
+                                       success = true;
+                               }
+                               finally
+                               {
+                                       if (!success)
+                                       {
+                                               DecRef();
+                                       }
+                               }
+
+
+                // Must assign this at the end -- if we hit an
+                // exception above core, we don't want to attempt to
+                // purge the FieldCache (will hit NPE because core is
+                // not assigned yet).
+                this.origInstance = origInstance;
+                       }
+                       
+                       internal TermVectorsReader GetTermVectorsReaderOrig()
+                       {
+                               lock (this)
+                               {
+                                       return termVectorsReaderOrig;
+                               }
+                       }
+                       
+                       internal FieldsReader GetFieldsReaderOrig()
+                       {
+                               lock (this)
+                               {
+                                       return fieldsReaderOrig;
+                               }
+                       }
+                       
+                       internal void  IncRef()
+                       {
+                               lock (this)
+                               {
+                                       ref_Renamed.IncRef();
+                               }
+                       }
+                       
+                       internal Directory GetCFSReader()
+                       {
+                               lock (this)
+                               {
+                                       return cfsReader;
+                               }
+                       }
+                       
+                       internal TermInfosReader GetTermsReader()
+                       {
+                               lock (this)
+                               {
+                                       if (tis != null)
+                                       {
+                                               return tis;
+                                       }
+                                       else
+                                       {
+                                               return tisNoIndex;
+                                       }
+                               }
+                       }
+                       
+                       internal bool TermsIndexIsLoaded()
+                       {
+                               lock (this)
+                               {
+                                       return tis != null;
+                               }
+                       }
+                       
+                       // NOTE: only called from IndexWriter when a near
+                       // real-time reader is opened, or applyDeletes is run,
+                       // sharing a segment that's still being merged.  This
+                       // method is not fully thread safe, and relies on the
+                       // synchronization in IndexWriter
+                       internal void  LoadTermsIndex(SegmentInfo si, int termsIndexDivisor)
+                       {
+                               lock (this)
+                               {
+                                       if (tis == null)
+                                       {
+                                               Directory dir0;
+                                               if (si.GetUseCompoundFile())
+                                               {
+                                                       // In some cases, we were originally opened when CFS
+                                                       // was not used, but then we are asked to open the
+                                                       // terms reader with index, the segment has switched
+                                                       // to CFS
+                                                       if (cfsReader == null)
+                                                       {
+                                                               cfsReader = new CompoundFileReader(dir, segment + "." + IndexFileNames.COMPOUND_FILE_EXTENSION, readBufferSize);
+                                                       }
+                                                       dir0 = cfsReader;
+                                               }
+                                               else
+                                               {
+                                                       dir0 = dir;
+                                               }
+                                               
+                                               tis = new TermInfosReader(dir0, segment, fieldInfos, readBufferSize, termsIndexDivisor);
+                                       }
+                               }
+                       }
+                       
+                       internal void  DecRef()
+                       {
+                               lock (this)
+                               {
+                                       
+                                       if (ref_Renamed.DecRef() == 0)
+                                       {
+                                               
+                                               // close everything, nothing is shared anymore with other readers
+                                               if (tis != null)
+                                               {
+                                                       tis.Close();
+                                                       // null so if an app hangs on to us we still free most ram
+                                                       tis = null;
+                                               }
+                                               
+                                               if (tisNoIndex != null)
+                                               {
+                                                       tisNoIndex.Close();
+                                               }
+                                               
+                                               if (freqStream != null)
+                                               {
+                                                       freqStream.Close();
+                                               }
+                                               
+                                               if (proxStream != null)
+                                               {
+                                                       proxStream.Close();
+                                               }
+                                               
+                                               if (termVectorsReaderOrig != null)
+                                               {
+                                                       termVectorsReaderOrig.Close();
+                                               }
+                                               
+                                               if (fieldsReaderOrig != null)
+                                               {
+                                                       fieldsReaderOrig.Close();
+                                               }
+                                               
+                                               if (cfsReader != null)
+                                               {
+                                                       cfsReader.Close();
+                                               }
+                                               
+                                               if (storeCFSReader != null)
+                                               {
+                                                       storeCFSReader.Close();
+                                               }
+
+                        // Force FieldCache to evict our entries at this point
+                        if (origInstance != null)
+                        {
+                            Mono.Lucene.Net.Search.FieldCache_Fields.DEFAULT.Purge(origInstance);
+                        }
+                                       }
+                               }
+                       }
+                       
+                       internal void  OpenDocStores(SegmentInfo si)
+                       {
+                               lock (this)
+                               {
+                                       
+                                       System.Diagnostics.Debug.Assert(si.name.Equals(segment));
+                                       
+                                       if (fieldsReaderOrig == null)
+                                       {
+                                               Directory storeDir;
+                                               if (si.GetDocStoreOffset() != - 1)
+                                               {
+                                                       if (si.GetDocStoreIsCompoundFile())
+                                                       {
+                                                               System.Diagnostics.Debug.Assert(storeCFSReader == null);
+                                                               storeCFSReader = new CompoundFileReader(dir, si.GetDocStoreSegment() + "." + IndexFileNames.COMPOUND_FILE_STORE_EXTENSION, readBufferSize);
+                                                               storeDir = storeCFSReader;
+                                                               System.Diagnostics.Debug.Assert(storeDir != null);
+                                                       }
+                                                       else
+                                                       {
+                                                               storeDir = dir;
+                                                               System.Diagnostics.Debug.Assert(storeDir != null);
+                                                       }
+                                               }
+                                               else if (si.GetUseCompoundFile())
+                                               {
+                                                       // In some cases, we were originally opened when CFS
+                                                       // was not used, but then we are asked to open doc
+                                                       // stores after the segment has switched to CFS
+                                                       if (cfsReader == null)
+                                                       {
+                                                               cfsReader = new CompoundFileReader(dir, segment + "." + IndexFileNames.COMPOUND_FILE_EXTENSION, readBufferSize);
+                                                       }
+                                                       storeDir = cfsReader;
+                                                       System.Diagnostics.Debug.Assert(storeDir != null);
+                                               }
+                                               else
+                                               {
+                                                       storeDir = dir;
+                                                       System.Diagnostics.Debug.Assert(storeDir != null);
+                                               }
+                                               
+                                               System.String storesSegment;
+                                               if (si.GetDocStoreOffset() != - 1)
+                                               {
+                                                       storesSegment = si.GetDocStoreSegment();
+                                               }
+                                               else
+                                               {
+                                                       storesSegment = segment;
+                                               }
+                                               
+                                               fieldsReaderOrig = new FieldsReader(storeDir, storesSegment, fieldInfos, readBufferSize, si.GetDocStoreOffset(), si.docCount);
+                                               
+                                               // Verify two sources of "maxDoc" agree:
+                                               if (si.GetDocStoreOffset() == - 1 && fieldsReaderOrig.Size() != si.docCount)
+                                               {
+                                                       throw new CorruptIndexException("doc counts differ for segment " + segment + ": fieldsReader shows " + fieldsReaderOrig.Size() + " but segmentInfo shows " + si.docCount);
+                                               }
+                                               
+                                               if (fieldInfos.HasVectors())
+                                               {
+                                                       // open term vector files only as needed
+                                                       termVectorsReaderOrig = new TermVectorsReader(storeDir, storesSegment, fieldInfos, readBufferSize, si.GetDocStoreOffset(), si.docCount);
+                                               }
+                                       }
+                               }
+                       }
+
+            public FieldInfos fieldInfos_ForNUnit
+            {
+                get { return fieldInfos; }
+            }
+               }
+               
+               /// <summary> Sets the initial value </summary>
+               private class FieldsReaderLocal:CloseableThreadLocal
+               {
+                       public FieldsReaderLocal(SegmentReader enclosingInstance)
+                       {
+                               InitBlock(enclosingInstance);
+                       }
+                       private void  InitBlock(SegmentReader enclosingInstance)
+                       {
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private SegmentReader enclosingInstance;
+                       public SegmentReader Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       public /*protected internal*/ override System.Object InitialValue()
+                       {
+                               return Enclosing_Instance.core.GetFieldsReaderOrig().Clone();
+                       }
+               }
+               
+               public /*internal*/ class Ref
+               {
+                       private int refCount = 1;
+                       
+                       public override System.String ToString()
+                       {
+                               return "refcount: " + refCount;
+                       }
+                       
+                       public virtual int RefCount()
+                       {
+                               lock (this)
+                               {
+                                       return refCount;
+                               }
+                       }
+                       
+                       public virtual int IncRef()
+                       {
+                               lock (this)
+                               {
+                                       System.Diagnostics.Debug.Assert(refCount > 0);
+                                       refCount++;
+                                       return refCount;
+                               }
+                       }
+                       
+                       public virtual int DecRef()
+                       {
+                               lock (this)
+                               {
+                                       System.Diagnostics.Debug.Assert(refCount > 0);
+                                       refCount--;
+                                       return refCount;
+                               }
+                       }
+               }
+               
+               /// <summary> Byte[] referencing is used because a new norm object needs 
+               /// to be created for each clone, and the byte array is all 
+               /// that is needed for sharing between cloned readers.  The 
+               /// current norm referencing is for sharing between readers 
+               /// whereas the byte[] referencing is for copy on write which 
+               /// is independent of reader references (i.e. incRef, decRef).
+               /// </summary>
+               
+               public /*internal*/ sealed class Norm : System.ICloneable
+               {
+                       private void  InitBlock(SegmentReader enclosingInstance)
+                       {
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private SegmentReader enclosingInstance;
+                       public SegmentReader Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       internal /*private*/ int refCount = 1;
+                       
+                       // If this instance is a clone, the originalNorm
+                       // references the Norm that has a real open IndexInput:
+                       private Norm origNorm;
+                       
+                       private IndexInput in_Renamed;
+                       private long normSeek;
+                       
+                       // null until bytes is set
+                       private Ref bytesRef;
+                       internal /*private*/ byte[] bytes;
+                       internal /*private*/ bool dirty;
+                       internal /*private*/ int number;
+                       internal /*private*/ bool rollbackDirty;
+                       
+                       public Norm(SegmentReader enclosingInstance, IndexInput in_Renamed, int number, long normSeek)
+                       {
+                               InitBlock(enclosingInstance);
+                               this.in_Renamed = in_Renamed;
+                               this.number = number;
+                               this.normSeek = normSeek;
+                       }
+                       
+                       public void  IncRef()
+                       {
+                               lock (this)
+                               {
+                                       System.Diagnostics.Debug.Assert(refCount > 0 &&(origNorm == null || origNorm.refCount > 0));
+                                       refCount++;
+                               }
+                       }
+                       
+                       private void  CloseInput()
+                       {
+                               if (in_Renamed != null)
+                               {
+                                       if (in_Renamed != Enclosing_Instance.singleNormStream)
+                                       {
+                                               // It's private to us -- just close it
+                                               in_Renamed.Close();
+                                       }
+                                       else
+                                       {
+                                               // We are sharing this with others -- decRef and
+                                               // maybe close the shared norm stream
+                                               if (Enclosing_Instance.singleNormRef.DecRef() == 0)
+                                               {
+                                                       Enclosing_Instance.singleNormStream.Close();
+                                                       Enclosing_Instance.singleNormStream = null;
+                                               }
+                                       }
+                                       
+                                       in_Renamed = null;
+                               }
+                       }
+                       
+                       public void  DecRef()
+                       {
+                               lock (this)
+                               {
+                                       System.Diagnostics.Debug.Assert(refCount > 0 &&(origNorm == null || origNorm.refCount > 0));
+                                       
+                                       if (--refCount == 0)
+                                       {
+                                               if (origNorm != null)
+                                               {
+                                                       origNorm.DecRef();
+                                                       origNorm = null;
+                                               }
+                                               else
+                                               {
+                                                       CloseInput();
+                                               }
+                                               
+                                               if (bytes != null)
+                                               {
+                                                       System.Diagnostics.Debug.Assert(bytesRef != null);
+                                                       bytesRef.DecRef();
+                                                       bytes = null;
+                                                       bytesRef = null;
+                                               }
+                                               else
+                                               {
+                                                       System.Diagnostics.Debug.Assert(bytesRef == null);
+                                               }
+                                       }
+                               }
+                       }
+                       
+                       // Load bytes but do not cache them if they were not
+                       // already cached
+                       public void  Bytes(byte[] bytesOut, int offset, int len)
+                       {
+                               lock (this)
+                               {
+                                       System.Diagnostics.Debug.Assert(refCount > 0 &&(origNorm == null || origNorm.refCount > 0));
+                                       if (bytes != null)
+                                       {
+                                               // Already cached -- copy from cache:
+                                               System.Diagnostics.Debug.Assert(len <= Enclosing_Instance.MaxDoc());
+                                               Array.Copy(bytes, 0, bytesOut, offset, len);
+                                       }
+                                       else
+                                       {
+                                               // Not cached
+                                               if (origNorm != null)
+                                               {
+                                                       // Ask origNorm to load
+                                                       origNorm.Bytes(bytesOut, offset, len);
+                                               }
+                                               else
+                                               {
+                                                       // We are orig -- read ourselves from disk:
+                                                       lock (in_Renamed)
+                                                       {
+                                                               in_Renamed.Seek(normSeek);
+                                                               in_Renamed.ReadBytes(bytesOut, offset, len, false);
+                                                       }
+                                               }
+                                       }
+                               }
+                       }
+                       
+                       // Load & cache full bytes array.  Returns bytes.
+                       public byte[] Bytes()
+                       {
+                               lock (this)
+                               {
+                                       System.Diagnostics.Debug.Assert(refCount > 0 &&(origNorm == null || origNorm.refCount > 0));
+                                       if (bytes == null)
+                                       {
+                                               // value not yet read
+                                               System.Diagnostics.Debug.Assert(bytesRef == null);
+                                               if (origNorm != null)
+                                               {
+                                                       // Ask origNorm to load so that for a series of
+                                                       // reopened readers we share a single read-only
+                                                       // byte[]
+                                                       bytes = origNorm.Bytes();
+                                                       bytesRef = origNorm.bytesRef;
+                                                       bytesRef.IncRef();
+                                                       
+                                                       // Once we've loaded the bytes we no longer need
+                                                       // origNorm:
+                                                       origNorm.DecRef();
+                                                       origNorm = null;
+                                               }
+                                               else
+                                               {
+                                                       // We are the origNorm, so load the bytes for real
+                                                       // ourself:
+                                                       int count = Enclosing_Instance.MaxDoc();
+                                                       bytes = new byte[count];
+                                                       
+                                                       // Since we are orig, in must not be null
+                                                       System.Diagnostics.Debug.Assert(in_Renamed != null);
+                                                       
+                                                       // Read from disk.
+                                                       lock (in_Renamed)
+                                                       {
+                                                               in_Renamed.Seek(normSeek);
+                                                               in_Renamed.ReadBytes(bytes, 0, count, false);
+                                                       }
+                                                       
+                                                       bytesRef = new Ref();
+                                                       CloseInput();
+                                               }
+                                       }
+                                       
+                                       return bytes;
+                               }
+                       }
+                       
+                       // Only for testing
+                       public /*internal*/ Ref BytesRef()
+                       {
+                               return bytesRef;
+                       }
+                       
+                       // Called if we intend to change a norm value.  We make a
+                       // private copy of bytes if it's shared with others:
+                       public byte[] CopyOnWrite()
+                       {
+                               lock (this)
+                               {
+                                       System.Diagnostics.Debug.Assert(refCount > 0 &&(origNorm == null || origNorm.refCount > 0));
+                                       Bytes();
+                                       System.Diagnostics.Debug.Assert(bytes != null);
+                                       System.Diagnostics.Debug.Assert(bytesRef != null);
+                                       if (bytesRef.RefCount() > 1)
+                                       {
+                                               // I cannot be the origNorm for another norm
+                                               // instance if I'm being changed.  Ie, only the
+                                               // "head Norm" can be changed:
+                                               System.Diagnostics.Debug.Assert(refCount == 1);
+                                               Ref oldRef = bytesRef;
+                                               bytes = Enclosing_Instance.CloneNormBytes(bytes);
+                                               bytesRef = new Ref();
+                                               oldRef.DecRef();
+                                       }
+                                       dirty = true;
+                                       return bytes;
+                               }
+                       }
+                       
+                       // Returns a copy of this Norm instance that shares
+                       // IndexInput & bytes with the original one
+                       public System.Object Clone()
+                       {
+                lock (this) //LUCENENET-375
+                {
+                    System.Diagnostics.Debug.Assert(refCount > 0 && (origNorm == null || origNorm.refCount > 0));
+
+                    Norm clone;
+                    try
+                    {
+                        clone = (Norm)base.MemberwiseClone();
+                    }
+                    catch (System.Exception cnse)
+                    {
+                        // Cannot happen
+                        throw new System.SystemException("unexpected CloneNotSupportedException", cnse);
+                    }
+                    clone.refCount = 1;
+
+                    if (bytes != null)
+                    {
+                        System.Diagnostics.Debug.Assert(bytesRef != null);
+                        System.Diagnostics.Debug.Assert(origNorm == null);
+
+                        // Clone holds a reference to my bytes:
+                        clone.bytesRef.IncRef();
+                    }
+                    else
+                    {
+                        System.Diagnostics.Debug.Assert(bytesRef == null);
+                        if (origNorm == null)
+                        {
+                            // I become the origNorm for the clone:
+                            clone.origNorm = this;
+                        }
+                        clone.origNorm.IncRef();
+                    }
+
+                    // Only the origNorm will actually readBytes from in:
+                    clone.in_Renamed = null;
+
+                    return clone;
+                }
+                       }
+                       
+                       // Flush all pending changes to the next generation
+                       // separate norms file.
+                       public void  ReWrite(SegmentInfo si)
+                       {
+                               System.Diagnostics.Debug.Assert(refCount > 0 && (origNorm == null || origNorm.refCount > 0), "refCount=" + refCount + " origNorm=" + origNorm);
+                               
+                               // NOTE: norms are re-written in regular directory, not cfs
+                               si.AdvanceNormGen(this.number);
+                               string normFileName = si.GetNormFileName(this.number);
+                IndexOutput @out = enclosingInstance.Directory().CreateOutput(normFileName);
+                bool success = false;
+                               try
+                               {
+                                       try {
+                        @out.WriteBytes(bytes, enclosingInstance.MaxDoc());
+                    } finally {
+                        @out.Close();
+                    }
+                    success = true;
+                               }
+                               finally
+                               {
+                    if (!success)
+                    {
+                        try
+                        {
+                            enclosingInstance.Directory().DeleteFile(normFileName);
+                        }
+                        catch (Exception t)
+                        {
+                            // suppress this so we keep throwing the
+                            // original exception
+                        }
+                    }
+                               }
+                               this.dirty = false;
+                       }
+               }
+               
+               internal System.Collections.IDictionary norms = new System.Collections.Hashtable();
+               
+               /// <summary>The class which implements SegmentReader. </summary>
+               // @deprecated (LUCENE-1677)
+               private static System.Type IMPL;
+               
+               // @deprecated (LUCENE-1677)
+               private static System.Type READONLY_IMPL;
+               
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               /// <deprecated>
+               /// </deprecated>
+        [Obsolete]
+               public static SegmentReader Get(SegmentInfo si)
+               {
+                       return Get(false, si.dir, si, BufferedIndexInput.BUFFER_SIZE, true, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
+               }
+               
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               public static SegmentReader Get(bool readOnly, SegmentInfo si, int termInfosIndexDivisor)
+               {
+                       return Get(readOnly, si.dir, si, BufferedIndexInput.BUFFER_SIZE, true, termInfosIndexDivisor);
+               }
+               
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               /// <deprecated>
+               /// </deprecated>
+        [Obsolete]
+               internal static SegmentReader Get(SegmentInfo si, int readBufferSize, bool doOpenStores, int termInfosIndexDivisor)
+               {
+                       return Get(false, si.dir, si, readBufferSize, doOpenStores, termInfosIndexDivisor);
+               }
+               
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               public static SegmentReader Get(bool readOnly, Directory dir, SegmentInfo si, int readBufferSize, bool doOpenStores, int termInfosIndexDivisor)
+               {
+                       SegmentReader instance;
+                       try
+                       {
+                               if (readOnly)
+                                       instance = (SegmentReader) System.Activator.CreateInstance(READONLY_IMPL);
+                               else
+                                       instance = (SegmentReader) System.Activator.CreateInstance(IMPL);
+                       }
+                       catch (System.Exception e)
+                       {
+                               throw new System.SystemException("cannot load SegmentReader class: " + e, e);
+                       }
+                       instance.readOnly = readOnly;
+                       instance.si = si;
+                       instance.readBufferSize = readBufferSize;
+                       
+                       bool success = false;
+                       
+                       try
+                       {
+                               instance.core = new CoreReaders(instance, dir, si, readBufferSize, termInfosIndexDivisor);
+                               if (doOpenStores)
+                               {
+                                       instance.core.OpenDocStores(si);
+                               }
+                               instance.LoadDeletedDocs();
+                               instance.OpenNorms(instance.core.cfsDir, readBufferSize);
+                               success = true;
+                       }
+                       finally
+                       {
+                               
+                               // With lock-less commits, it's entirely possible (and
+                               // fine) to hit a FileNotFound exception above.  In
+                               // this case, we want to explicitly close any subset
+                               // of things that were opened so that we don't have to
+                               // wait for a GC to do so.
+                               if (!success)
+                               {
+                                       instance.DoClose();
+                               }
+                       }
+                       return instance;
+               }
+               
+               internal virtual void  OpenDocStores()
+               {
+                       core.OpenDocStores(si);
+               }
+
+        private bool CheckDeletedCounts()
+        {
+            int recomputedCount = deletedDocs.GetRecomputedCount();
+
+            System.Diagnostics.Debug.Assert(deletedDocs.Count() == recomputedCount, "deleted count=" + deletedDocs.Count() + " vs recomputed count=" + recomputedCount);
+
+            System.Diagnostics.Debug.Assert(si.GetDelCount() == recomputedCount, "delete count mismatch: info=" + si.GetDelCount() + " vs BitVector=" + recomputedCount);
+
+            // Verify # deletes does not exceed maxDoc for this
+            // segment:
+            System.Diagnostics.Debug.Assert(si.GetDelCount() <= MaxDoc(), "delete count mismatch: " + recomputedCount + ") exceeds max doc (" + MaxDoc() + ") for segment " + si.name);
+
+            return true;
+        }
+               
+               private void  LoadDeletedDocs()
+               {
+                       // NOTE: the bitvector is stored using the regular directory, not cfs
+                       if (HasDeletions(si))
+                       {
+                               deletedDocs = new BitVector(Directory(), si.GetDelFileName());
+                               deletedDocsRef = new Ref();
+
+                System.Diagnostics.Debug.Assert(CheckDeletedCounts());
+                       }
+                       else 
+                               System.Diagnostics.Debug.Assert(si.GetDelCount() == 0);
+               }
+               
+               /// <summary> Clones the norm bytes.  May be overridden by subclasses.  New and experimental.</summary>
+               /// <param name="bytes">Byte array to clone
+               /// </param>
+               /// <returns> New BitVector
+               /// </returns>
+               protected internal virtual byte[] CloneNormBytes(byte[] bytes)
+               {
+                       byte[] cloneBytes = new byte[bytes.Length];
+                       Array.Copy(bytes, 0, cloneBytes, 0, bytes.Length);
+                       return cloneBytes;
+               }
+               
+               /// <summary> Clones the deleteDocs BitVector.  May be overridden by subclasses. New and experimental.</summary>
+               /// <param name="bv">BitVector to clone
+               /// </param>
+               /// <returns> New BitVector
+               /// </returns>
+               protected internal virtual BitVector CloneDeletedDocs(BitVector bv)
+               {
+                       return (BitVector) bv.Clone();
+               }
+               
+               public override System.Object Clone()
+               {
+            lock (this)
+            {
+                try
+                {
+                    return Clone(readOnly); // Preserve current readOnly
+                }
+                catch (System.Exception ex)
+                {
+                    throw new System.SystemException(ex.Message, ex);
+                }
+            }
+               }
+               
+               public override IndexReader Clone(bool openReadOnly)
+               {
+                       lock (this)
+                       {
+                               return ReopenSegment(si, true, openReadOnly);
+                       }
+               }
+               
+               internal virtual SegmentReader ReopenSegment(SegmentInfo si, bool doClone, bool openReadOnly)
+               {
+                       lock (this)
+                       {
+                               bool deletionsUpToDate = (this.si.HasDeletions() == si.HasDeletions()) && (!si.HasDeletions() || this.si.GetDelFileName().Equals(si.GetDelFileName()));
+                               bool normsUpToDate = true;
+                               
+                               bool[] fieldNormsChanged = new bool[core.fieldInfos.Size()];
+                               int fieldCount = core.fieldInfos.Size();
+                               for (int i = 0; i < fieldCount; i++)
+                               {
+                                       if (!this.si.GetNormFileName(i).Equals(si.GetNormFileName(i)))
+                                       {
+                                               normsUpToDate = false;
+                                               fieldNormsChanged[i] = true;
+                                       }
+                               }
+                               
+                               // if we're cloning we need to run through the reopenSegment logic
+                               // also if both old and new readers aren't readonly, we clone to avoid sharing modifications
+                               if (normsUpToDate && deletionsUpToDate && !doClone && openReadOnly && readOnly)
+                               {
+                                       return this;
+                               }
+                               
+                               // When cloning, the incoming SegmentInfos should not
+                               // have any changes in it:
+                               System.Diagnostics.Debug.Assert(!doClone ||(normsUpToDate && deletionsUpToDate));
+                               
+                               // clone reader
+                               SegmentReader clone;
+                               try
+                               {
+                                       if (openReadOnly)
+                                               clone = (SegmentReader) System.Activator.CreateInstance(READONLY_IMPL);
+                                       else
+                                               clone = (SegmentReader) System.Activator.CreateInstance(IMPL);
+                               }
+                               catch (System.Exception e)
+                               {
+                                       throw new System.SystemException("cannot load SegmentReader class: " + e, e);
+                               }
+                               
+                               bool success = false;
+                               try
+                               {
+                                       core.IncRef();
+                                       clone.core = core;
+                                       clone.readOnly = openReadOnly;
+                                       clone.si = si;
+                                       clone.readBufferSize = readBufferSize;
+                                       
+                                       if (!openReadOnly && hasChanges)
+                                       {
+                                               // My pending changes transfer to the new reader
+                                               clone.pendingDeleteCount = pendingDeleteCount;
+                                               clone.deletedDocsDirty = deletedDocsDirty;
+                                               clone.normsDirty = normsDirty;
+                                               clone.hasChanges = hasChanges;
+                                               hasChanges = false;
+                                       }
+                                       
+                                       if (doClone)
+                                       {
+                                               if (deletedDocs != null)
+                                               {
+                                                       deletedDocsRef.IncRef();
+                                                       clone.deletedDocs = deletedDocs;
+                                                       clone.deletedDocsRef = deletedDocsRef;
+                                               }
+                                       }
+                                       else
+                                       {
+                                               if (!deletionsUpToDate)
+                                               {
+                                                       // load deleted docs
+                                                       System.Diagnostics.Debug.Assert(clone.deletedDocs == null);
+                                                       clone.LoadDeletedDocs();
+                                               }
+                                               else if (deletedDocs != null)
+                                               {
+                                                       deletedDocsRef.IncRef();
+                                                       clone.deletedDocs = deletedDocs;
+                                                       clone.deletedDocsRef = deletedDocsRef;
+                                               }
+                                       }
+                                       
+                                       clone.SetDisableFakeNorms(GetDisableFakeNorms());
+                                       clone.norms = new System.Collections.Hashtable();
+                                       
+                                       // Clone norms
+                                       for (int i = 0; i < fieldNormsChanged.Length; i++)
+                                       {
+                                               
+                                               // Clone unchanged norms to the cloned reader
+                                               if (doClone || !fieldNormsChanged[i])
+                                               {
+                                                       System.String curField = core.fieldInfos.FieldInfo(i).name;
+                                                       Norm norm = (Norm) this.norms[curField];
+                                                       if (norm != null)
+                                                               clone.norms[curField] = norm.Clone();
+                                               }
+                                       }
+                                       
+                                       // If we are not cloning, then this will open anew
+                                       // any norms that have changed:
+                                       clone.OpenNorms(si.GetUseCompoundFile()?core.GetCFSReader():Directory(), readBufferSize);
+                                       
+                                       success = true;
+                               }
+                               finally
+                               {
+                                       if (!success)
+                                       {
+                                               // An exception occured during reopen, we have to decRef the norms
+                                               // that we incRef'ed already and close singleNormsStream and FieldsReader
+                                               clone.DecRef();
+                                       }
+                               }
+                               
+                               return clone;
+                       }
+               }
+               
+               /// <deprecated>  
+               /// </deprecated>
+        [Obsolete]
+               protected internal override void  DoCommit()
+               {
+                       DoCommit(null);
+               }
+
+        protected internal override void DoCommit(System.Collections.Generic.IDictionary<string, string> commitUserData)
+        {
+            if (hasChanges)
+            {
+                StartCommit();
+                bool success = false;
+                try
+                {
+                    CommitChanges(commitUserData);
+                    success = true;
+                }
+                finally
+                {
+                    if (!success)
+                    {
+                        RollbackCommit();
+                    }
+                }
+            }
+        }
+
+        private void CommitChanges(System.Collections.Generic.IDictionary<string, string> commitUserData)
+        {
+            if (deletedDocsDirty)
+            {               // re-write deleted
+                si.AdvanceDelGen();
+
+                // We can write directly to the actual name (vs to a
+                // .tmp & renaming it) because the file is not live
+                // until segments file is written:
+                string delFileName = si.GetDelFileName();
+                bool success = false;
+                try
+                {
+                    deletedDocs.Write(Directory(), delFileName);
+                    success = true;
+                }
+                finally
+                {
+                    if (!success)
+                    {
+                        try
+                        {
+                            Directory().DeleteFile(delFileName);
+                        }
+                        catch (Exception t)
+                        {
+                            // suppress this so we keep throwing the
+                            // original exception
+                        }
+                    }
+                }
+
+                si.SetDelCount(si.GetDelCount() + pendingDeleteCount);
+                pendingDeleteCount = 0;
+                System.Diagnostics.Debug.Assert(deletedDocs.Count() == si.GetDelCount(), "delete count mismatch during commit: info=" + si.GetDelCount() + " vs BitVector=" + deletedDocs.Count());
+            }
+            else
+            {
+                System.Diagnostics.Debug.Assert(pendingDeleteCount == 0);
+            }
+
+            if (normsDirty)
+            {               // re-write norms
+                si.SetNumFields(core.fieldInfos.Size());
+                System.Collections.IEnumerator it = norms.Values.GetEnumerator();
+                while (it.MoveNext())
+                {
+                    Norm norm = (Norm)it.Current;
+                    if (norm.dirty)
+                    {
+                        norm.ReWrite(si);
+                    }
+                }
+            }
+
+            deletedDocsDirty = false;
+            normsDirty = false;
+            hasChanges = false;
+        }
+        
+               internal virtual FieldsReader GetFieldsReader()
+               {
+                       return (FieldsReader) fieldsReaderLocal.Get();
+               }
+               
+               protected internal override void  DoClose()
+               {
+                       termVectorsLocal.Close();
+                       fieldsReaderLocal.Close();
+                       
+                       if (deletedDocs != null)
+                       {
+                               deletedDocsRef.DecRef();
+                               // null so if an app hangs on to us we still free most ram
+                               deletedDocs = null;
+                       }
+                       
+                       System.Collections.IEnumerator it = norms.Values.GetEnumerator();
+                       while (it.MoveNext())
+                       {
+                               ((Norm) it.Current).DecRef();
+                       }
+                       if (core != null)
+                       {
+                               core.DecRef();
+                       }
+               }
+               
+               internal static bool HasDeletions(SegmentInfo si)
+               {
+                       // Don't call ensureOpen() here (it could affect performance)
+                       return si.HasDeletions();
+               }
+               
+               public override bool HasDeletions()
+               {
+                       // Don't call ensureOpen() here (it could affect performance)
+                       return deletedDocs != null;
+               }
+               
+               internal static bool UsesCompoundFile(SegmentInfo si)
+               {
+                       return si.GetUseCompoundFile();
+               }
+               
+               internal static bool HasSeparateNorms(SegmentInfo si)
+               {
+                       return si.HasSeparateNorms();
+               }
+               
+               protected internal override void  DoDelete(int docNum)
+               {
+                       if (deletedDocs == null)
+                       {
+                               deletedDocs = new BitVector(MaxDoc());
+                               deletedDocsRef = new Ref();
+                       }
+                       // there is more than 1 SegmentReader with a reference to this
+                       // deletedDocs BitVector so decRef the current deletedDocsRef,
+                       // clone the BitVector, create a new deletedDocsRef
+                       if (deletedDocsRef.RefCount() > 1)
+                       {
+                               Ref oldRef = deletedDocsRef;
+                               deletedDocs = CloneDeletedDocs(deletedDocs);
+                               deletedDocsRef = new Ref();
+                               oldRef.DecRef();
+                       }
+                       deletedDocsDirty = true;
+                       if (!deletedDocs.GetAndSet(docNum))
+                               pendingDeleteCount++;
+               }
+               
+               protected internal override void  DoUndeleteAll()
+               {
+                       deletedDocsDirty = false;
+                       if (deletedDocs != null)
+                       {
+                               System.Diagnostics.Debug.Assert(deletedDocsRef != null);
+                               deletedDocsRef.DecRef();
+                               deletedDocs = null;
+                               deletedDocsRef = null;
+                               pendingDeleteCount = 0;
+                               si.ClearDelGen();
+                               si.SetDelCount(0);
+                       }
+                       else
+                       {
+                               System.Diagnostics.Debug.Assert(deletedDocsRef == null);
+                               System.Diagnostics.Debug.Assert(pendingDeleteCount == 0);
+                       }
+               }
+               
+               internal virtual System.Collections.Generic.IList<string> Files()
+               {
+                       return si.Files();
+               }
+               
+               public override TermEnum Terms()
+               {
+                       EnsureOpen();
+                       return core.GetTermsReader().Terms();
+               }
+               
+               public override TermEnum Terms(Term t)
+               {
+                       EnsureOpen();
+                       return core.GetTermsReader().Terms(t);
+               }
+               
+               public /*internal*/ virtual FieldInfos FieldInfos()
+               {
+                       return core.fieldInfos;
+               }
+               
+               public override Document Document(int n, FieldSelector fieldSelector)
+               {
+                       EnsureOpen();
+                       return GetFieldsReader().Doc(n, fieldSelector);
+               }
+               
+               public override bool IsDeleted(int n)
+               {
+                       lock (this)
+                       {
+                               return (deletedDocs != null && deletedDocs.Get(n));
+                       }
+               }
+               
+               public override TermDocs TermDocs(Term term)
+               {
+                       if (term == null)
+                       {
+                               return new AllTermDocs(this);
+                       }
+                       else
+                       {
+                               return base.TermDocs(term);
+                       }
+               }
+               
+               public override TermDocs TermDocs()
+               {
+                       EnsureOpen();
+                       return new SegmentTermDocs(this);
+               }
+               
+               public override TermPositions TermPositions()
+               {
+                       EnsureOpen();
+                       return new SegmentTermPositions(this);
+               }
+               
+               public override int DocFreq(Term t)
+               {
+                       EnsureOpen();
+                       TermInfo ti = core.GetTermsReader().Get(t);
+                       if (ti != null)
+                               return ti.docFreq;
+                       else
+                               return 0;
+               }
+               
+               public override int NumDocs()
+               {
+                       // Don't call ensureOpen() here (it could affect performance)
+                       int n = MaxDoc();
+                       if (deletedDocs != null)
+                               n -= deletedDocs.Count();
+                       return n;
+               }
+               
+               public override int MaxDoc()
+               {
+                       // Don't call ensureOpen() here (it could affect performance)
+                       return si.docCount;
+               }
+               
+               /// <seealso cref="IndexReader.GetFieldNames(IndexReader.FieldOption)">
+               /// </seealso>
+        public override System.Collections.Generic.ICollection<string> GetFieldNames(IndexReader.FieldOption fieldOption)
+               {
+                       EnsureOpen();
+
+            System.Collections.Generic.IDictionary<string, string> fieldSet = new System.Collections.Generic.Dictionary<string, string>();
+                       for (int i = 0; i < core.fieldInfos.Size(); i++)
+                       {
+                               FieldInfo fi = core.fieldInfos.FieldInfo(i);
+                               if (fieldOption == IndexReader.FieldOption.ALL)
+                               {
+                                       fieldSet[fi.name] = fi.name;
+                               }
+                               else if (!fi.isIndexed && fieldOption == IndexReader.FieldOption.UNINDEXED)
+                               {
+                                       fieldSet[fi.name] = fi.name;
+                               }
+                               else if (fi.omitTermFreqAndPositions && fieldOption == IndexReader.FieldOption.OMIT_TERM_FREQ_AND_POSITIONS)
+                               {
+                                       fieldSet[fi.name] = fi.name;
+                               }
+                               else if (fi.storePayloads && fieldOption == IndexReader.FieldOption.STORES_PAYLOADS)
+                               {
+                                       fieldSet[fi.name] = fi.name;
+                               }
+                               else if (fi.isIndexed && fieldOption == IndexReader.FieldOption.INDEXED)
+                               {
+                                       fieldSet[fi.name] = fi.name;
+                               }
+                               else if (fi.isIndexed && fi.storeTermVector == false && fieldOption == IndexReader.FieldOption.INDEXED_NO_TERMVECTOR)
+                               {
+                                       fieldSet[fi.name] = fi.name;
+                               }
+                               else if (fi.storeTermVector == true && fi.storePositionWithTermVector == false && fi.storeOffsetWithTermVector == false && fieldOption == IndexReader.FieldOption.TERMVECTOR)
+                               {
+                                       fieldSet[fi.name] = fi.name;
+                               }
+                               else if (fi.isIndexed && fi.storeTermVector && fieldOption == IndexReader.FieldOption.INDEXED_WITH_TERMVECTOR)
+                               {
+                                       fieldSet[fi.name] = fi.name;
+                               }
+                               else if (fi.storePositionWithTermVector && fi.storeOffsetWithTermVector == false && fieldOption == IndexReader.FieldOption.TERMVECTOR_WITH_POSITION)
+                               {
+                                       fieldSet[fi.name] = fi.name;
+                               }
+                               else if (fi.storeOffsetWithTermVector && fi.storePositionWithTermVector == false && fieldOption == IndexReader.FieldOption.TERMVECTOR_WITH_OFFSET)
+                               {
+                                       fieldSet[fi.name] = fi.name;
+                               }
+                               else if ((fi.storeOffsetWithTermVector && fi.storePositionWithTermVector) && fieldOption == IndexReader.FieldOption.TERMVECTOR_WITH_POSITION_OFFSET)
+                               {
+                                       fieldSet[fi.name] = fi.name;
+                               }
+                       }
+                       return fieldSet.Keys;
+               }
+               
+               
+               public override bool HasNorms(System.String field)
+               {
+                       lock (this)
+                       {
+                               EnsureOpen();
+                               return norms.Contains(field);
+                       }
+               }
+               
+               internal static byte[] CreateFakeNorms(int size)
+               {
+                       byte[] ones = new byte[size];
+                       byte val = (byte) DefaultSimilarity.EncodeNorm(1.0f);
+            for (int i = 0; i < ones.Length; i++)
+            {
+                ones[i] = val;
+            }
+                       return ones;
+               }
+               
+               private byte[] ones;
+               private byte[] FakeNorms()
+               {
+                       System.Diagnostics.Debug.Assert(!GetDisableFakeNorms());
+                       if (ones == null)
+                               ones = CreateFakeNorms(MaxDoc());
+                       return ones;
+               }
+               
+               // can return null if norms aren't stored
+               protected internal virtual byte[] GetNorms(System.String field)
+               {
+                       lock (this)
+                       {
+                               Norm norm = (Norm) norms[field];
+                               if (norm == null)
+                                       return null; // not indexed, or norms not stored
+                               return norm.Bytes();
+                       }
+               }
+               
+               // returns fake norms if norms aren't available
+               public override byte[] Norms(System.String field)
+               {
+                       lock (this)
+                       {
+                               EnsureOpen();
+                               byte[] bytes = GetNorms(field);
+                               if (bytes == null && !GetDisableFakeNorms())
+                                       bytes = FakeNorms();
+                               return bytes;
+                       }
+               }
+               
+               protected internal override void  DoSetNorm(int doc, System.String field, byte value_Renamed)
+               {
+                       Norm norm = (Norm) norms[field];
+                       if (norm == null)
+                       // not an indexed field
+                               return ;
+                       
+                       normsDirty = true;
+                       norm.CopyOnWrite()[doc] = value_Renamed; // set the value
+               }
+               
+               /// <summary>Read norms into a pre-allocated array. </summary>
+               public override void  Norms(System.String field, byte[] bytes, int offset)
+               {
+                       lock (this)
+                       {
+                               
+                               EnsureOpen();
+                               Norm norm = (Norm) norms[field];
+                               if (norm == null)
+                               {
+                    for (int i = offset; i < bytes.Length; i++)
+                    {
+                        bytes[i] = (byte) DefaultSimilarity.EncodeNorm(1.0f);
+                    }
+                                       return ;
+                               }
+                               
+                               norm.Bytes(bytes, offset, MaxDoc());
+                       }
+               }
+               
+               
+               private void  OpenNorms(Directory cfsDir, int readBufferSize)
+               {
+                       long nextNormSeek = SegmentMerger.NORMS_HEADER.Length; //skip header (header unused for now)
+                       int maxDoc = MaxDoc();
+                       for (int i = 0; i < core.fieldInfos.Size(); i++)
+                       {
+                               FieldInfo fi = core.fieldInfos.FieldInfo(i);
+                               if (norms.Contains(fi.name))
+                               {
+                                       // in case this SegmentReader is being re-opened, we might be able to
+                                       // reuse some norm instances and skip loading them here
+                                       continue;
+                               }
+                               if (fi.isIndexed && !fi.omitNorms)
+                               {
+                                       Directory d = Directory();
+                                       System.String fileName = si.GetNormFileName(fi.number);
+                                       if (!si.HasSeparateNorms(fi.number))
+                                       {
+                                               d = cfsDir;
+                                       }
+                                       
+                                       // singleNormFile means multiple norms share this file
+                                       bool singleNormFile = fileName.EndsWith("." + IndexFileNames.NORMS_EXTENSION);
+                                       IndexInput normInput = null;
+                                       long normSeek;
+                                       
+                                       if (singleNormFile)
+                                       {
+                                               normSeek = nextNormSeek;
+                                               if (singleNormStream == null)
+                                               {
+                                                       singleNormStream = d.OpenInput(fileName, readBufferSize);
+                                                       singleNormRef = new Ref();
+                                               }
+                                               else
+                                               {
+                                                       singleNormRef.IncRef();
+                                               }
+                                               // All norms in the .nrm file can share a single IndexInput since
+                                               // they are only used in a synchronized context.
+                                               // If this were to change in the future, a clone could be done here.
+                                               normInput = singleNormStream;
+                                       }
+                                       else
+                                       {
+                                               normSeek = 0;
+                                               normInput = d.OpenInput(fileName);
+                                       }
+                                       
+                                       norms[fi.name] = new Norm(this, normInput, fi.number, normSeek);
+                                       nextNormSeek += maxDoc; // increment also if some norms are separate
+                               }
+                       }
+               }
+               
+               public /*internal*/ virtual bool TermsIndexLoaded()
+               {
+                       return core.TermsIndexIsLoaded();
+               }
+               
+               // NOTE: only called from IndexWriter when a near
+               // real-time reader is opened, or applyDeletes is run,
+               // sharing a segment that's still being merged.  This
+               // method is not thread safe, and relies on the
+               // synchronization in IndexWriter
+               internal virtual void  LoadTermsIndex(int termsIndexDivisor)
+               {
+                       core.LoadTermsIndex(si, termsIndexDivisor);
+               }
+               
+               // for testing only
+               public /*internal*/ virtual bool NormsClosed()
+               {
+                       if (singleNormStream != null)
+                       {
+                               return false;
+                       }
+                       System.Collections.IEnumerator it = norms.Values.GetEnumerator();
+                       while (it.MoveNext())
+                       {
+                               Norm norm = (Norm) it.Current;
+                               if (norm.refCount > 0)
+                               {
+                                       return false;
+                               }
+                       }
+                       return true;
+               }
+               
+               // for testing only
+               public /*internal*/ virtual bool NormsClosed(System.String field)
+               {
+                       Norm norm = (Norm) norms[field];
+                       return norm.refCount == 0;
+               }
+               
+               /// <summary> Create a clone from the initial TermVectorsReader and store it in the ThreadLocal.</summary>
+               /// <returns> TermVectorsReader
+               /// </returns>
+               internal virtual TermVectorsReader GetTermVectorsReader()
+               {
+                       TermVectorsReader tvReader = (TermVectorsReader) termVectorsLocal.Get();
+                       if (tvReader == null)
+                       {
+                               TermVectorsReader orig = core.GetTermVectorsReaderOrig();
+                               if (orig == null)
+                               {
+                                       return null;
+                               }
+                               else
+                               {
+                                       try
+                                       {
+                                               tvReader = (TermVectorsReader) orig.Clone();
+                                       }
+                                       catch (System.Exception cnse)
+                                       {
+                                               return null;
+                                       }
+                               }
+                               termVectorsLocal.Set(tvReader);
+                       }
+                       return tvReader;
+               }
+               
+               internal virtual TermVectorsReader GetTermVectorsReaderOrig()
+               {
+                       return core.GetTermVectorsReaderOrig();
+               }
+               
+               /// <summary>Return a term frequency vector for the specified document and field. The
+               /// vector returned contains term numbers and frequencies for all terms in
+               /// the specified field of this document, if the field had storeTermVector
+               /// flag set.  If the flag was not set, the method returns null.
+               /// </summary>
+               /// <throws>  IOException </throws>
+               public override TermFreqVector GetTermFreqVector(int docNumber, System.String field)
+               {
+                       // Check if this field is invalid or has no stored term vector
+                       EnsureOpen();
+                       FieldInfo fi = core.fieldInfos.FieldInfo(field);
+                       if (fi == null || !fi.storeTermVector)
+                               return null;
+                       
+                       TermVectorsReader termVectorsReader = GetTermVectorsReader();
+                       if (termVectorsReader == null)
+                               return null;
+                       
+                       return termVectorsReader.Get(docNumber, field);
+               }
+               
+               
+               public override void  GetTermFreqVector(int docNumber, System.String field, TermVectorMapper mapper)
+               {
+                       EnsureOpen();
+                       FieldInfo fi = core.fieldInfos.FieldInfo(field);
+                       if (fi == null || !fi.storeTermVector)
+                               return ;
+                       
+                       TermVectorsReader termVectorsReader = GetTermVectorsReader();
+                       if (termVectorsReader == null)
+                       {
+                               return ;
+                       }
+                       
+                       
+                       termVectorsReader.Get(docNumber, field, mapper);
+               }
+               
+               
+               public override void  GetTermFreqVector(int docNumber, TermVectorMapper mapper)
+               {
+                       EnsureOpen();
+                       
+                       TermVectorsReader termVectorsReader = GetTermVectorsReader();
+                       if (termVectorsReader == null)
+                               return ;
+                       
+                       termVectorsReader.Get(docNumber, mapper);
+               }
+               
+               /// <summary>Return an array of term frequency vectors for the specified document.
+               /// The array contains a vector for each vectorized field in the document.
+               /// Each vector vector contains term numbers and frequencies for all terms
+               /// in a given vectorized field.
+               /// If no such fields existed, the method returns null.
+               /// </summary>
+               /// <throws>  IOException </throws>
+               public override TermFreqVector[] GetTermFreqVectors(int docNumber)
+               {
+                       EnsureOpen();
+                       
+                       TermVectorsReader termVectorsReader = GetTermVectorsReader();
+                       if (termVectorsReader == null)
+                               return null;
+                       
+                       return termVectorsReader.Get(docNumber);
+               }
+               
+               /// <summary> Return the name of the segment this reader is reading.</summary>
+               public virtual System.String GetSegmentName()
+               {
+                       return core.segment;
+               }
+               
+               /// <summary> Return the SegmentInfo of the segment this reader is reading.</summary>
+               internal virtual SegmentInfo GetSegmentInfo()
+               {
+                       return si;
+               }
+               
+               internal virtual void  SetSegmentInfo(SegmentInfo info)
+               {
+                       si = info;
+               }
+               
+               internal virtual void  StartCommit()
+               {
+            rollbackSegmentInfo = (SegmentInfo)si.Clone();
+                       rollbackHasChanges = hasChanges;
+                       rollbackDeletedDocsDirty = deletedDocsDirty;
+                       rollbackNormsDirty = normsDirty;
+                       rollbackPendingDeleteCount = pendingDeleteCount;
+                       System.Collections.IEnumerator it = norms.Values.GetEnumerator();
+                       while (it.MoveNext())
+                       {
+                               Norm norm = (Norm) it.Current;
+                               norm.rollbackDirty = norm.dirty;
+                       }
+               }
+               
+               internal virtual void  RollbackCommit()
+               {
+            si.Reset(rollbackSegmentInfo);
+                       hasChanges = rollbackHasChanges;
+                       deletedDocsDirty = rollbackDeletedDocsDirty;
+                       normsDirty = rollbackNormsDirty;
+                       pendingDeleteCount = rollbackPendingDeleteCount;
+                       System.Collections.IEnumerator it = norms.Values.GetEnumerator();
+                       while (it.MoveNext())
+                       {
+                               Norm norm = (Norm) it.Current;
+                               norm.dirty = norm.rollbackDirty;
+                       }
+               }
+               
+               /// <summary>Returns the directory this index resides in. </summary>
+               public override Directory Directory()
+               {
+                       // Don't ensureOpen here -- in certain cases, when a
+                       // cloned/reopened reader needs to commit, it may call
+                       // this method on the closed original reader
+                       return core.dir;
+               }
+               
+               // This is necessary so that cloned SegmentReaders (which
+               // share the underlying postings data) will map to the
+               // same entry in the FieldCache.  See LUCENE-1579.
+        [Obsolete("Mono.Lucene.Net-2.9.1. This method overrides obsolete member Mono.Lucene.Net.Index.IndexReader.GetFieldCacheKey()")]
+               public override System.Object GetFieldCacheKey()
+               {
+                       return core.freqStream;
+               }
+
+               public override object GetDeletesCacheKey() 
+        {
+            return deletedDocs;
+        }
+
+
+               public override long GetUniqueTermCount()
+               {
+                       return core.GetTermsReader().Size();
+               }
+               
+               /// <summary> Lotsa tests did hacks like:<br/>
+               /// SegmentReader reader = (SegmentReader) IndexReader.open(dir);<br/>
+               /// They broke. This method serves as a hack to keep hacks working
+               /// </summary>
+               public /*internal*/ static SegmentReader GetOnlySegmentReader(Directory dir)
+               {
+                       return GetOnlySegmentReader(IndexReader.Open(dir));
+               }
+               
+               public /*internal*/ static SegmentReader GetOnlySegmentReader(IndexReader reader)
+               {
+                       if (reader is SegmentReader)
+                               return (SegmentReader) reader;
+                       
+                       if (reader is DirectoryReader)
+                       {
+                               IndexReader[] subReaders = reader.GetSequentialSubReaders();
+                               if (subReaders.Length != 1)
+                               {
+                                       throw new System.ArgumentException(reader + " has " + subReaders.Length + " segments instead of exactly one");
+                               }
+                               
+                               return (SegmentReader) subReaders[0];
+                       }
+                       
+                       throw new System.ArgumentException(reader + " is not a SegmentReader or a single-segment DirectoryReader");
+               }
+               
+               public override int GetTermInfosIndexDivisor()
+               {
+                       return core.termsIndexDivisor;
+               }
+               static SegmentReader()
+               {
+                       {
+                               try
+                               {
+                                       System.String name = SupportClass.AppSettings.Get("Mono.Lucene.Net.SegmentReader.class", typeof(SegmentReader).FullName);
+                                       IMPL = System.Type.GetType(name);
+                               }
+                               catch (System.Security.SecurityException se)
+                               {
+                                       try
+                                       {
+                                               IMPL = System.Type.GetType(typeof(SegmentReader).FullName);
+                                       }
+                                       catch (System.Exception e)
+                                       {
+                                               throw new System.SystemException("cannot load default SegmentReader class: " + e, e);
+                                       }
+                               }
+                               catch (System.Exception e)
+                               {
+                                       throw new System.SystemException("cannot load SegmentReader class: " + e, e);
+                               }
+                       }
+                       {
+                               try
+                               {
+                                       System.String name = SupportClass.AppSettings.Get("Mono.Lucene.Net.ReadOnlySegmentReader.class", typeof(ReadOnlySegmentReader).FullName);
+                                       READONLY_IMPL = System.Type.GetType(name);
+                               }
+                               catch (System.Security.SecurityException se)
+                               {
+                                       try
+                                       {
+                                               READONLY_IMPL = System.Type.GetType(typeof(ReadOnlySegmentReader).FullName);
+                                       }
+                                       catch (System.Exception e)
+                                       {
+                                               throw new System.SystemException("cannot load default ReadOnlySegmentReader class: " + e, e);
+                                       }
+                               }
+                               catch (System.Exception e)
+                               {
+                                       throw new System.SystemException("cannot load ReadOnlySegmentReader class: " + e, e);
+                               }
+                       }
+               }
+
+        public System.Collections.IDictionary norms_ForNUnit
+        {
+            get { return norms; }
+        }
+
+        public BitVector deletedDocs_ForNUnit
+        {
+            get { return deletedDocs; }
+        }
+
+        public CoreReaders core_ForNUnit
+        {
+            get { return core; }
+        }
+
+        public Ref deletedDocsRef_ForNUnit
+        {
+            get { return deletedDocsRef; }
+        }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/SegmentTermDocs.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/SegmentTermDocs.cs
new file mode 100644 (file)
index 0000000..dc5bfff
--- /dev/null
@@ -0,0 +1,270 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexInput = Mono.Lucene.Net.Store.IndexInput;
+using BitVector = Mono.Lucene.Net.Util.BitVector;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       public class SegmentTermDocs : TermDocs
+       {
+               protected internal SegmentReader parent;
+               protected internal IndexInput freqStream;
+               protected internal int count;
+               protected internal int df;
+               protected internal BitVector deletedDocs;
+               internal int doc = 0;
+               internal int freq;
+               
+               private int skipInterval;
+               private int maxSkipLevels;
+               private DefaultSkipListReader skipListReader;
+               
+               private long freqBasePointer;
+               private long proxBasePointer;
+               
+               private long skipPointer;
+               private bool haveSkipped;
+               
+               protected internal bool currentFieldStoresPayloads;
+               protected internal bool currentFieldOmitTermFreqAndPositions;
+               
+               public /*protected internal*/ SegmentTermDocs(SegmentReader parent)
+               {
+                       this.parent = parent;
+                       this.freqStream = (IndexInput) parent.core.freqStream.Clone();
+                       lock (parent)
+                       {
+                               this.deletedDocs = parent.deletedDocs;
+                       }
+                       this.skipInterval = parent.core.GetTermsReader().GetSkipInterval();
+                       this.maxSkipLevels = parent.core.GetTermsReader().GetMaxSkipLevels();
+               }
+               
+               public virtual void  Seek(Term term)
+               {
+                       TermInfo ti = parent.core.GetTermsReader().Get(term);
+                       Seek(ti, term);
+               }
+               
+               public virtual void  Seek(TermEnum termEnum)
+               {
+                       TermInfo ti;
+                       Term term;
+                       
+                       // use comparison of fieldinfos to verify that termEnum belongs to the same segment as this SegmentTermDocs
+                       if (termEnum is SegmentTermEnum && ((SegmentTermEnum) termEnum).fieldInfos == parent.core.fieldInfos)
+                       {
+                               // optimized case
+                               SegmentTermEnum segmentTermEnum = ((SegmentTermEnum) termEnum);
+                               term = segmentTermEnum.Term();
+                               ti = segmentTermEnum.TermInfo();
+                       }
+                       else
+                       {
+                               // punt case
+                               term = termEnum.Term();
+                               ti = parent.core.GetTermsReader().Get(term);
+                       }
+                       
+                       Seek(ti, term);
+               }
+               
+               internal virtual void  Seek(TermInfo ti, Term term)
+               {
+                       count = 0;
+                       FieldInfo fi = parent.core.fieldInfos.FieldInfo(term.field);
+                       currentFieldOmitTermFreqAndPositions = (fi != null)?fi.omitTermFreqAndPositions:false;
+                       currentFieldStoresPayloads = (fi != null)?fi.storePayloads:false;
+                       if (ti == null)
+                       {
+                               df = 0;
+                       }
+                       else
+                       {
+                               df = ti.docFreq;
+                               doc = 0;
+                               freqBasePointer = ti.freqPointer;
+                               proxBasePointer = ti.proxPointer;
+                               skipPointer = freqBasePointer + ti.skipOffset;
+                               freqStream.Seek(freqBasePointer);
+                               haveSkipped = false;
+                       }
+               }
+               
+               public virtual void  Close()
+               {
+                       freqStream.Close();
+                       if (skipListReader != null)
+                               skipListReader.Close();
+               }
+               
+               public int Doc()
+               {
+                       return doc;
+               }
+               public int Freq()
+               {
+                       return freq;
+               }
+               
+               protected internal virtual void  SkippingDoc()
+               {
+               }
+               
+               public virtual bool Next()
+               {
+                       while (true)
+                       {
+                               if (count == df)
+                                       return false;
+                               int docCode = freqStream.ReadVInt();
+                               
+                               if (currentFieldOmitTermFreqAndPositions)
+                               {
+                                       doc += docCode;
+                                       freq = 1;
+                               }
+                               else
+                               {
+                                       doc += SupportClass.Number.URShift(docCode, 1); // shift off low bit
+                                       if ((docCode & 1) != 0)
+                                       // if low bit is set
+                                               freq = 1;
+                                       // freq is one
+                                       else
+                                               freq = freqStream.ReadVInt(); // else read freq
+                               }
+                               
+                               count++;
+                               
+                               if (deletedDocs == null || !deletedDocs.Get(doc))
+                                       break;
+                               SkippingDoc();
+                       }
+                       return true;
+               }
+               
+               /// <summary>Optimized implementation. </summary>
+               public virtual int Read(int[] docs, int[] freqs)
+               {
+                       int length = docs.Length;
+                       if (currentFieldOmitTermFreqAndPositions)
+                       {
+                               return ReadNoTf(docs, freqs, length);
+                       }
+                       else
+                       {
+                               int i = 0;
+                               while (i < length && count < df)
+                               {
+                                       // manually inlined call to next() for speed
+                                       int docCode = freqStream.ReadVInt();
+                                       doc += SupportClass.Number.URShift(docCode, 1); // shift off low bit
+                                       if ((docCode & 1) != 0)
+                                       // if low bit is set
+                                               freq = 1;
+                                       // freq is one
+                                       else
+                                               freq = freqStream.ReadVInt(); // else read freq
+                                       count++;
+                                       
+                                       if (deletedDocs == null || !deletedDocs.Get(doc))
+                                       {
+                                               docs[i] = doc;
+                                               freqs[i] = freq;
+                                               ++i;
+                                       }
+                               }
+                               return i;
+                       }
+               }
+               
+               private int ReadNoTf(int[] docs, int[] freqs, int length)
+               {
+                       int i = 0;
+                       while (i < length && count < df)
+                       {
+                               // manually inlined call to next() for speed
+                               doc += freqStream.ReadVInt();
+                               count++;
+                               
+                               if (deletedDocs == null || !deletedDocs.Get(doc))
+                               {
+                                       docs[i] = doc;
+                                       // Hardware freq to 1 when term freqs were not
+                                       // stored in the index
+                                       freqs[i] = 1;
+                                       ++i;
+                               }
+                       }
+                       return i;
+               }
+               
+               
+               /// <summary>Overridden by SegmentTermPositions to skip in prox stream. </summary>
+               protected internal virtual void  SkipProx(long proxPointer, int payloadLength)
+               {
+               }
+               
+               /// <summary>Optimized implementation. </summary>
+               public virtual bool SkipTo(int target)
+               {
+                       if (df >= skipInterval)
+                       {
+                               // optimized case
+                               if (skipListReader == null)
+                                       skipListReader = new DefaultSkipListReader((IndexInput) freqStream.Clone(), maxSkipLevels, skipInterval); // lazily clone
+                               
+                               if (!haveSkipped)
+                               {
+                                       // lazily initialize skip stream
+                                       skipListReader.Init(skipPointer, freqBasePointer, proxBasePointer, df, currentFieldStoresPayloads);
+                                       haveSkipped = true;
+                               }
+                               
+                               int newCount = skipListReader.SkipTo(target);
+                               if (newCount > count)
+                               {
+                                       freqStream.Seek(skipListReader.GetFreqPointer());
+                                       SkipProx(skipListReader.GetProxPointer(), skipListReader.GetPayloadLength());
+                                       
+                                       doc = skipListReader.GetDoc();
+                                       count = newCount;
+                               }
+                       }
+                       
+                       // done skipping, now just scan
+                       do 
+                       {
+                               if (!Next())
+                                       return false;
+                       }
+                       while (target > doc);
+                       return true;
+               }
+
+        public IndexInput freqStream_ForNUnit
+        {
+            get { return freqStream; }
+            set { freqStream = value; }
+        }
+    }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/SegmentTermEnum.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/SegmentTermEnum.cs
new file mode 100644 (file)
index 0000000..6a52fe5
--- /dev/null
@@ -0,0 +1,250 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexInput = Mono.Lucene.Net.Store.IndexInput;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       public sealed class SegmentTermEnum:TermEnum, System.ICloneable
+       {
+               private IndexInput input;
+               internal FieldInfos fieldInfos;
+               internal long size;
+               internal long position = - 1;
+               
+               private TermBuffer termBuffer = new TermBuffer();
+               private TermBuffer prevBuffer = new TermBuffer();
+               private TermBuffer scanBuffer = new TermBuffer(); // used for scanning
+               
+               private TermInfo termInfo = new TermInfo();
+               
+               private int format;
+               private bool isIndex = false;
+               internal long indexPointer = 0;
+               internal int indexInterval;
+               internal int skipInterval;
+               internal int maxSkipLevels;
+               private int formatM1SkipInterval;
+               
+               internal SegmentTermEnum(IndexInput i, FieldInfos fis, bool isi)
+               {
+                       input = i;
+                       fieldInfos = fis;
+                       isIndex = isi;
+                       maxSkipLevels = 1; // use single-level skip lists for formats > -3 
+                       
+                       int firstInt = input.ReadInt();
+                       if (firstInt >= 0)
+                       {
+                               // original-format file, without explicit format version number
+                               format = 0;
+                               size = firstInt;
+                               
+                               // back-compatible settings
+                               indexInterval = 128;
+                               skipInterval = System.Int32.MaxValue; // switch off skipTo optimization
+                       }
+                       else
+                       {
+                               // we have a format version number
+                               format = firstInt;
+                               
+                               // check that it is a format we can understand
+                               if (format < TermInfosWriter.FORMAT_CURRENT)
+                                       throw new CorruptIndexException("Unknown format version:" + format + " expected " + TermInfosWriter.FORMAT_CURRENT + " or higher");
+                               
+                               size = input.ReadLong(); // read the size
+                               
+                               if (format == - 1)
+                               {
+                                       if (!isIndex)
+                                       {
+                                               indexInterval = input.ReadInt();
+                                               formatM1SkipInterval = input.ReadInt();
+                                       }
+                                       // switch off skipTo optimization for file format prior to 1.4rc2 in order to avoid a bug in 
+                                       // skipTo implementation of these versions
+                                       skipInterval = System.Int32.MaxValue;
+                               }
+                               else
+                               {
+                                       indexInterval = input.ReadInt();
+                                       skipInterval = input.ReadInt();
+                                       if (format <= TermInfosWriter.FORMAT)
+                                       {
+                                               // this new format introduces multi-level skipping
+                                               maxSkipLevels = input.ReadInt();
+                                       }
+                               }
+                               System.Diagnostics.Debug.Assert(indexInterval > 0, "indexInterval=" + indexInterval + " is negative; must be > 0");
+                               System.Diagnostics.Debug.Assert(skipInterval > 0, "skipInterval=" + skipInterval + " is negative; must be > 0");
+                       }
+                       if (format > TermInfosWriter.FORMAT_VERSION_UTF8_LENGTH_IN_BYTES)
+                       {
+                               termBuffer.SetPreUTF8Strings();
+                               scanBuffer.SetPreUTF8Strings();
+                               prevBuffer.SetPreUTF8Strings();
+                       }
+               }
+               
+               public System.Object Clone()
+               {
+                       SegmentTermEnum clone = null;
+                       try
+                       {
+                               clone = (SegmentTermEnum) base.MemberwiseClone();
+                       }
+                       catch (System.Exception e)
+                       {
+                       }
+                       
+                       clone.input = (IndexInput) input.Clone();
+                       clone.termInfo = new TermInfo(termInfo);
+                       
+                       clone.termBuffer = (TermBuffer) termBuffer.Clone();
+                       clone.prevBuffer = (TermBuffer) prevBuffer.Clone();
+                       clone.scanBuffer = new TermBuffer();
+                       
+                       return clone;
+               }
+               
+               internal void  Seek(long pointer, long p, Term t, TermInfo ti)
+               {
+                       input.Seek(pointer);
+                       position = p;
+                       termBuffer.Set(t);
+                       prevBuffer.Reset();
+                       termInfo.Set(ti);
+               }
+               
+               /// <summary>Increments the enumeration to the next element.  True if one exists.</summary>
+               public override bool Next()
+               {
+                       if (position++ >= size - 1)
+                       {
+                               prevBuffer.Set(termBuffer);
+                               termBuffer.Reset();
+                               return false;
+                       }
+                       
+                       prevBuffer.Set(termBuffer);
+                       termBuffer.Read(input, fieldInfos);
+                       
+                       termInfo.docFreq = input.ReadVInt(); // read doc freq
+                       termInfo.freqPointer += input.ReadVLong(); // read freq pointer
+                       termInfo.proxPointer += input.ReadVLong(); // read prox pointer
+                       
+                       if (format == - 1)
+                       {
+                               //  just read skipOffset in order to increment  file pointer; 
+                               // value is never used since skipTo is switched off
+                               if (!isIndex)
+                               {
+                                       if (termInfo.docFreq > formatM1SkipInterval)
+                                       {
+                                               termInfo.skipOffset = input.ReadVInt();
+                                       }
+                               }
+                       }
+                       else
+                       {
+                               if (termInfo.docFreq >= skipInterval)
+                                       termInfo.skipOffset = input.ReadVInt();
+                       }
+                       
+                       if (isIndex)
+                               indexPointer += input.ReadVLong(); // read index pointer
+                       
+                       return true;
+               }
+               
+               /// <summary>Optimized scan, without allocating new terms. 
+               /// Return number of invocations to next(). 
+               /// </summary>
+               internal int ScanTo(Term term)
+               {
+                       scanBuffer.Set(term);
+                       int count = 0;
+                       while (scanBuffer.CompareTo(termBuffer) > 0 && Next())
+                       {
+                               count++;
+                       }
+                       return count;
+               }
+               
+               /// <summary>Returns the current Term in the enumeration.
+               /// Initially invalid, valid after next() called for the first time.
+               /// </summary>
+               public override Term Term()
+               {
+                       return termBuffer.ToTerm();
+               }
+               
+               /// <summary>Returns the previous Term enumerated. Initially null.</summary>
+               public /*internal*/ Term Prev()
+               {
+                       return prevBuffer.ToTerm();
+               }
+               
+               /// <summary>Returns the current TermInfo in the enumeration.
+               /// Initially invalid, valid after next() called for the first time.
+               /// </summary>
+               internal TermInfo TermInfo()
+               {
+                       return new TermInfo(termInfo);
+               }
+               
+               /// <summary>Sets the argument to the current TermInfo in the enumeration.
+               /// Initially invalid, valid after next() called for the first time.
+               /// </summary>
+               internal void  TermInfo(TermInfo ti)
+               {
+                       ti.Set(termInfo);
+               }
+               
+               /// <summary>Returns the docFreq from the current TermInfo in the enumeration.
+               /// Initially invalid, valid after next() called for the first time.
+               /// </summary>
+               public override int DocFreq()
+               {
+                       return termInfo.docFreq;
+               }
+               
+               /* Returns the freqPointer from the current TermInfo in the enumeration.
+               Initially invalid, valid after next() called for the first time.*/
+               internal long FreqPointer()
+               {
+                       return termInfo.freqPointer;
+               }
+               
+               /* Returns the proxPointer from the current TermInfo in the enumeration.
+               Initially invalid, valid after next() called for the first time.*/
+               internal long ProxPointer()
+               {
+                       return termInfo.proxPointer;
+               }
+               
+               /// <summary>Closes the enumeration to further activity, freeing resources. </summary>
+               public override void  Close()
+               {
+                       input.Close();
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/SegmentTermPositionVector.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/SegmentTermPositionVector.cs
new file mode 100644 (file)
index 0000000..865a720
--- /dev/null
@@ -0,0 +1,73 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       class SegmentTermPositionVector:SegmentTermVector, TermPositionVector
+       {
+               protected internal int[][] positions;
+               protected internal TermVectorOffsetInfo[][] offsets;
+               public static readonly int[] EMPTY_TERM_POS = new int[0];
+               
+               public SegmentTermPositionVector(System.String field, System.String[] terms, int[] termFreqs, int[][] positions, TermVectorOffsetInfo[][] offsets):base(field, terms, termFreqs)
+               {
+                       this.offsets = offsets;
+                       this.positions = positions;
+               }
+               
+               /// <summary> Returns an array of TermVectorOffsetInfo in which the term is found.
+               /// 
+               /// </summary>
+               /// <param name="index">The position in the array to get the offsets from
+               /// </param>
+               /// <returns> An array of TermVectorOffsetInfo objects or the empty list
+               /// </returns>
+               /// <seealso cref="Mono.Lucene.Net.Analysis.Token">
+               /// </seealso>
+               public virtual TermVectorOffsetInfo[] GetOffsets(int index)
+               {
+                       TermVectorOffsetInfo[] result = TermVectorOffsetInfo.EMPTY_OFFSET_INFO;
+                       if (offsets == null)
+                               return null;
+                       if (index >= 0 && index < offsets.Length)
+                       {
+                               result = offsets[index];
+                       }
+                       return result;
+               }
+               
+               /// <summary> Returns an array of positions in which the term is found.
+               /// Terms are identified by the index at which its number appears in the
+               /// term String array obtained from the <code>indexOf</code> method.
+               /// </summary>
+               public virtual int[] GetTermPositions(int index)
+               {
+                       int[] result = EMPTY_TERM_POS;
+                       if (positions == null)
+                               return null;
+                       if (index >= 0 && index < positions.Length)
+                       {
+                               result = positions[index];
+                       }
+                       
+                       return result;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/SegmentTermPositions.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/SegmentTermPositions.cs
new file mode 100644 (file)
index 0000000..4a569d7
--- /dev/null
@@ -0,0 +1,227 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexInput = Mono.Lucene.Net.Store.IndexInput;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       public sealed class SegmentTermPositions:SegmentTermDocs, TermPositions
+       {
+               private IndexInput proxStream;
+               private int proxCount;
+               private int position;
+               
+               // the current payload length
+               private int payloadLength;
+               // indicates whether the payload of the currend position has
+               // been read from the proxStream yet
+               private bool needToLoadPayload;
+               
+               // these variables are being used to remember information
+               // for a lazy skip
+               private long lazySkipPointer = - 1;
+               private int lazySkipProxCount = 0;
+               
+               internal SegmentTermPositions(SegmentReader p):base(p)
+               {
+                       this.proxStream = null; // the proxStream will be cloned lazily when nextPosition() is called for the first time
+               }
+               
+               internal override void  Seek(TermInfo ti, Term term)
+               {
+                       base.Seek(ti, term);
+                       if (ti != null)
+                               lazySkipPointer = ti.proxPointer;
+                       
+                       lazySkipProxCount = 0;
+                       proxCount = 0;
+                       payloadLength = 0;
+                       needToLoadPayload = false;
+               }
+               
+               public override void  Close()
+               {
+                       base.Close();
+                       if (proxStream != null)
+                               proxStream.Close();
+               }
+               
+               public int NextPosition()
+               {
+                       if (currentFieldOmitTermFreqAndPositions)
+                       // This field does not store term freq, positions, payloads
+                               return 0;
+                       // perform lazy skips if neccessary
+                       LazySkip();
+                       proxCount--;
+                       return position += ReadDeltaPosition();
+               }
+               
+               private int ReadDeltaPosition()
+               {
+                       int delta = proxStream.ReadVInt();
+                       if (currentFieldStoresPayloads)
+                       {
+                               // if the current field stores payloads then
+                               // the position delta is shifted one bit to the left.
+                               // if the LSB is set, then we have to read the current
+                               // payload length
+                               if ((delta & 1) != 0)
+                               {
+                                       payloadLength = proxStream.ReadVInt();
+                               }
+                               delta = SupportClass.Number.URShift(delta, 1);
+                               needToLoadPayload = true;
+                       }
+                       return delta;
+               }
+               
+               protected internal override void  SkippingDoc()
+               {
+                       // we remember to skip a document lazily
+                       lazySkipProxCount += freq;
+               }
+               
+               public override bool Next()
+               {
+                       // we remember to skip the remaining positions of the current
+                       // document lazily
+                       lazySkipProxCount += proxCount;
+                       
+                       if (base.Next())
+                       {
+                               // run super
+                               proxCount = freq; // note frequency
+                               position = 0; // reset position
+                               return true;
+                       }
+                       return false;
+               }
+               
+               public override int Read(int[] docs, int[] freqs)
+               {
+                       throw new System.NotSupportedException("TermPositions does not support processing multiple documents in one call. Use TermDocs instead.");
+               }
+               
+               
+               /// <summary>Called by super.skipTo(). </summary>
+               protected internal override void  SkipProx(long proxPointer, int payloadLength)
+               {
+                       // we save the pointer, we might have to skip there lazily
+                       lazySkipPointer = proxPointer;
+                       lazySkipProxCount = 0;
+                       proxCount = 0;
+                       this.payloadLength = payloadLength;
+                       needToLoadPayload = false;
+               }
+               
+               private void  SkipPositions(int n)
+               {
+                       System.Diagnostics.Debug.Assert(!currentFieldOmitTermFreqAndPositions);
+                       for (int f = n; f > 0; f--)
+                       {
+                               // skip unread positions
+                               ReadDeltaPosition();
+                               SkipPayload();
+                       }
+               }
+               
+               private void  SkipPayload()
+               {
+                       if (needToLoadPayload && payloadLength > 0)
+                       {
+                               proxStream.Seek(proxStream.GetFilePointer() + payloadLength);
+                       }
+                       needToLoadPayload = false;
+               }
+               
+               // It is not always neccessary to move the prox pointer
+               // to a new document after the freq pointer has been moved.
+               // Consider for example a phrase query with two terms:
+               // the freq pointer for term 1 has to move to document x
+               // to answer the question if the term occurs in that document. But
+               // only if term 2 also matches document x, the positions have to be
+               // read to figure out if term 1 and term 2 appear next
+               // to each other in document x and thus satisfy the query.
+               // So we move the prox pointer lazily to the document
+               // as soon as positions are requested.
+               private void  LazySkip()
+               {
+                       if (proxStream == null)
+                       {
+                               // clone lazily
+                               proxStream = (IndexInput) parent.core.proxStream.Clone();
+                       }
+                       
+                       // we might have to skip the current payload
+                       // if it was not read yet
+                       SkipPayload();
+                       
+                       if (lazySkipPointer != - 1)
+                       {
+                               proxStream.Seek(lazySkipPointer);
+                               lazySkipPointer = - 1;
+                       }
+                       
+                       if (lazySkipProxCount != 0)
+                       {
+                               SkipPositions(lazySkipProxCount);
+                               lazySkipProxCount = 0;
+                       }
+               }
+               
+               public int GetPayloadLength()
+               {
+                       return payloadLength;
+               }
+               
+               public byte[] GetPayload(byte[] data, int offset)
+               {
+                       if (!needToLoadPayload)
+                       {
+                               throw new System.IO.IOException("Either no payload exists at this term position or an attempt was made to load it more than once.");
+                       }
+                       
+                       // read payloads lazily
+                       byte[] retArray;
+                       int retOffset;
+                       if (data == null || data.Length - offset < payloadLength)
+                       {
+                               // the array is too small to store the payload data,
+                               // so we allocate a new one
+                               retArray = new byte[payloadLength];
+                               retOffset = 0;
+                       }
+                       else
+                       {
+                               retArray = data;
+                               retOffset = offset;
+                       }
+                       proxStream.ReadBytes(retArray, retOffset, payloadLength);
+                       needToLoadPayload = false;
+                       return retArray;
+               }
+               
+               public bool IsPayloadAvailable()
+               {
+                       return needToLoadPayload && payloadLength > 0;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/SegmentTermVector.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/SegmentTermVector.cs
new file mode 100644 (file)
index 0000000..f31e75f
--- /dev/null
@@ -0,0 +1,103 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       
+       class SegmentTermVector : TermFreqVector
+       {
+               private System.String field;
+               private System.String[] terms;
+               private int[] termFreqs;
+               
+               internal SegmentTermVector(System.String field, System.String[] terms, int[] termFreqs)
+               {
+                       this.field = field;
+                       this.terms = terms;
+                       this.termFreqs = termFreqs;
+               }
+               
+               /// <summary> </summary>
+               /// <returns> The number of the field this vector is associated with
+               /// </returns>
+               public virtual System.String GetField()
+               {
+                       return field;
+               }
+               
+               public override System.String ToString()
+               {
+                       System.Text.StringBuilder sb = new System.Text.StringBuilder();
+                       sb.Append('{');
+                       sb.Append(field).Append(": ");
+                       if (terms != null)
+                       {
+                               for (int i = 0; i < terms.Length; i++)
+                               {
+                                       if (i > 0)
+                                               sb.Append(", ");
+                                       sb.Append(terms[i]).Append('/').Append(termFreqs[i]);
+                               }
+                       }
+                       sb.Append('}');
+                       
+                       return sb.ToString();
+               }
+               
+               public virtual int Size()
+               {
+                       return terms == null?0:terms.Length;
+               }
+               
+               public virtual System.String[] GetTerms()
+               {
+                       return terms;
+               }
+               
+               public virtual int[] GetTermFrequencies()
+               {
+                       return termFreqs;
+               }
+               
+               public virtual int IndexOf(System.String termText)
+               {
+                       if (terms == null)
+                               return - 1;
+            int res = System.Array.BinarySearch(terms, termText, System.StringComparer.Ordinal);
+                       return res >= 0?res:- 1;
+               }
+               
+               public virtual int[] IndexesOf(System.String[] termNumbers, int start, int len)
+               {
+                       // TODO: there must be a more efficient way of doing this.
+                       //       At least, we could advance the lower bound of the terms array
+                       //       as we find valid indexes. Also, it might be possible to leverage
+                       //       this even more by starting in the middle of the termNumbers array
+                       //       and thus dividing the terms array maybe in half with each found index.
+                       int[] res = new int[len];
+                       
+                       for (int i = 0; i < len; i++)
+                       {
+                               res[i] = IndexOf(termNumbers[start + i]);
+                       }
+                       return res;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/SegmentWriteState.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/SegmentWriteState.cs
new file mode 100644 (file)
index 0000000..89d97d5
--- /dev/null
@@ -0,0 +1,53 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using Directory = Mono.Lucene.Net.Store.Directory;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       class SegmentWriteState
+       {
+               internal DocumentsWriter docWriter;
+               internal Directory directory;
+               internal System.String segmentName;
+               internal System.String docStoreSegmentName;
+               internal int numDocs;
+               internal int termIndexInterval;
+               internal int numDocsInStore;
+               internal System.Collections.Hashtable flushedFiles;
+               
+               public SegmentWriteState(DocumentsWriter docWriter, Directory directory, System.String segmentName, System.String docStoreSegmentName, int numDocs, int numDocsInStore, int termIndexInterval)
+               {
+                       this.docWriter = docWriter;
+                       this.directory = directory;
+                       this.segmentName = segmentName;
+                       this.docStoreSegmentName = docStoreSegmentName;
+                       this.numDocs = numDocs;
+                       this.numDocsInStore = numDocsInStore;
+                       this.termIndexInterval = termIndexInterval;
+            flushedFiles = new System.Collections.Hashtable();
+               }
+               
+               public virtual System.String SegmentFileName(System.String ext)
+               {
+                       return segmentName + "." + ext;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/SerialMergeScheduler.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/SerialMergeScheduler.cs
new file mode 100644 (file)
index 0000000..1038ca6
--- /dev/null
@@ -0,0 +1,52 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       /// <summary>A {@link MergeScheduler} that simply does each merge
+       /// sequentially, using the current thread. 
+       /// </summary>
+       public class SerialMergeScheduler:MergeScheduler
+       {
+               
+               /// <summary>Just do the merges in sequence. We do this
+               /// "synchronized" so that even if the application is using
+               /// multiple threads, only one merge may run at a time. 
+               /// </summary>
+               public override void  Merge(IndexWriter writer)
+               {
+                       lock (this)
+                       {
+                               
+                               while (true)
+                               {
+                                       MergePolicy.OneMerge merge = writer.GetNextMerge();
+                                       if (merge == null)
+                                               break;
+                                       writer.Merge(merge);
+                               }
+                       }
+               }
+               
+               public override void  Close()
+               {
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/SnapshotDeletionPolicy.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/SnapshotDeletionPolicy.cs
new file mode 100644 (file)
index 0000000..2cb56e8
--- /dev/null
@@ -0,0 +1,194 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using Directory = Mono.Lucene.Net.Store.Directory;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       /// <summary>A {@link IndexDeletionPolicy} that wraps around any other
+       /// {@link IndexDeletionPolicy} and adds the ability to hold and
+       /// later release a single "snapshot" of an index.  While
+       /// the snapshot is held, the {@link IndexWriter} will not
+       /// remove any files associated with it even if the index is
+       /// otherwise being actively, arbitrarily changed.  Because
+       /// we wrap another arbitrary {@link IndexDeletionPolicy}, this
+       /// gives you the freedom to continue using whatever {@link
+       /// IndexDeletionPolicy} you would normally want to use with your
+       /// index.  Note that you can re-use a single instance of
+       /// SnapshotDeletionPolicy across multiple writers as long
+       /// as they are against the same index Directory.  Any
+       /// snapshot held when a writer is closed will "survive"
+       /// when the next writer is opened.
+       /// 
+       /// <p/><b>WARNING</b>: This API is a new and experimental and
+       /// may suddenly change.<p/> 
+       /// </summary>
+       
+       public class SnapshotDeletionPolicy : IndexDeletionPolicy
+       {
+               
+               private IndexCommit lastCommit;
+               private IndexDeletionPolicy primary;
+               private System.String snapshot;
+               
+               public SnapshotDeletionPolicy(IndexDeletionPolicy primary)
+               {
+                       this.primary = primary;
+               }
+               
+               public virtual void  OnInit(System.Collections.IList commits)
+               {
+                       lock (this)
+                       {
+                               primary.OnInit(WrapCommits(commits));
+                               lastCommit = (IndexCommit) commits[commits.Count - 1];
+                       }
+               }
+               
+               public virtual void  OnCommit(System.Collections.IList commits)
+               {
+                       lock (this)
+                       {
+                               primary.OnCommit(WrapCommits(commits));
+                               lastCommit = (IndexCommit) commits[commits.Count - 1];
+                       }
+               }
+               
+               /// <summary>Take a snapshot of the most recent commit to the
+               /// index.  You must call release() to free this snapshot.
+               /// Note that while the snapshot is held, the files it
+               /// references will not be deleted, which will consume
+               /// additional disk space in your index. If you take a
+               /// snapshot at a particularly bad time (say just before
+               /// you call optimize()) then in the worst case this could
+               /// consume an extra 1X of your total index size, until
+               /// you release the snapshot. 
+               /// </summary>
+               // TODO 3.0: change this to return IndexCommit instead
+               public virtual IndexCommitPoint Snapshot()
+               {
+                       lock (this)
+                       {
+                if (lastCommit == null)
+                {
+                    throw new System.SystemException("no index commits to snapshot !");
+                }
+                               if (snapshot == null)
+                                       snapshot = lastCommit.GetSegmentsFileName();
+                               else
+                                       throw new System.SystemException("snapshot is already set; please call release() first");
+                               return lastCommit;
+                       }
+               }
+               
+               /// <summary>Release the currently held snapshot. </summary>
+               public virtual void  Release()
+               {
+                       lock (this)
+                       {
+                               if (snapshot != null)
+                                       snapshot = null;
+                               else
+                                       throw new System.SystemException("snapshot was not set; please call snapshot() first");
+                       }
+               }
+               
+               private class MyCommitPoint:IndexCommit
+               {
+                       private void  InitBlock(SnapshotDeletionPolicy enclosingInstance)
+                       {
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private SnapshotDeletionPolicy enclosingInstance;
+                       public SnapshotDeletionPolicy Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       internal IndexCommit cp;
+                       internal MyCommitPoint(SnapshotDeletionPolicy enclosingInstance, IndexCommit cp)
+                       {
+                               InitBlock(enclosingInstance);
+                               this.cp = cp;
+                       }
+
+            public override string ToString()
+            {
+                return "SnapshotDeletionPolicy.SnapshotCommitPoint(" + cp + ")";
+            }
+
+                       public override System.String GetSegmentsFileName()
+                       {
+                               return cp.GetSegmentsFileName();
+                       }
+            public override System.Collections.Generic.ICollection<string> GetFileNames()
+                       {
+                               return cp.GetFileNames();
+                       }
+                       public override Directory GetDirectory()
+                       {
+                               return cp.GetDirectory();
+                       }
+                       public override void  Delete()
+                       {
+                               lock (Enclosing_Instance)
+                               {
+                                       // Suppress the delete request if this commit point is
+                                       // our current snapshot.
+                                       if (Enclosing_Instance.snapshot == null || !Enclosing_Instance.snapshot.Equals(GetSegmentsFileName()))
+                                               cp.Delete();
+                               }
+                       }
+                       public override bool IsDeleted()
+                       {
+                               return cp.IsDeleted();
+                       }
+                       public override long GetVersion()
+                       {
+                               return cp.GetVersion();
+                       }
+                       public override long GetGeneration()
+                       {
+                               return cp.GetGeneration();
+                       }
+            public override System.Collections.Generic.IDictionary<string, string> GetUserData()
+                       {
+                               return cp.GetUserData();
+                       }
+
+            public override bool IsOptimized()
+            {
+                return cp.IsOptimized();
+            }
+               }
+               
+               private System.Collections.IList WrapCommits(System.Collections.IList commits)
+               {
+                       int count = commits.Count;
+                       System.Collections.IList myCommits = new System.Collections.ArrayList(count);
+                       for (int i = 0; i < count; i++)
+                               myCommits.Add(new MyCommitPoint(this, (IndexCommit) commits[i]));
+                       return myCommits;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/SortedTermVectorMapper.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/SortedTermVectorMapper.cs
new file mode 100644 (file)
index 0000000..f5e9525
--- /dev/null
@@ -0,0 +1,132 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       /// <summary> Store a sorted collection of {@link Mono.Lucene.Net.Index.TermVectorEntry}s.  Collects all term information
+       /// into a single, SortedSet.
+       /// <br/>
+       /// NOTE: This Mapper ignores all Field information for the Document.  This means that if you are using offset/positions you will not
+       /// know what Fields they correlate with.
+       /// <br/>
+       /// This is not thread-safe  
+       /// </summary>
+       public class SortedTermVectorMapper:TermVectorMapper
+       {
+
+
+        private System.Collections.Generic.SortedDictionary<System.Object, System.Object> currentSet;
+               private System.Collections.IDictionary termToTVE = new System.Collections.Hashtable();
+               private bool storeOffsets;
+               private bool storePositions;
+               /// <summary> Stand-in name for the field in {@link TermVectorEntry}.</summary>
+               public const System.String ALL = "_ALL_";
+               
+               /// <summary> </summary>
+               /// <param name="comparator">A Comparator for sorting {@link TermVectorEntry}s
+               /// </param>
+               public SortedTermVectorMapper(System.Collections.Generic.IComparer<System.Object> comparator):this(false, false, comparator)
+               {
+               }
+               
+               
+               public SortedTermVectorMapper(bool ignoringPositions, bool ignoringOffsets, System.Collections.Generic.IComparer<System.Object> comparator):base(ignoringPositions, ignoringOffsets)
+               {
+            currentSet = new System.Collections.Generic.SortedDictionary<System.Object, System.Object>(comparator);
+               }
+               
+               /// <summary> </summary>
+               /// <param name="term">The term to map
+               /// </param>
+               /// <param name="frequency">The frequency of the term
+               /// </param>
+               /// <param name="offsets">Offset information, may be null
+               /// </param>
+               /// <param name="positions">Position information, may be null
+               /// </param>
+               //We need to combine any previous mentions of the term
+               public override void  Map(System.String term, int frequency, TermVectorOffsetInfo[] offsets, int[] positions)
+               {
+                       TermVectorEntry entry = (TermVectorEntry) termToTVE[term];
+                       if (entry == null)
+                       {
+                               entry = new TermVectorEntry(ALL, term, frequency, storeOffsets == true?offsets:null, storePositions == true?positions:null);
+                               termToTVE[term] = entry;
+                               currentSet.Add(entry, entry);
+                       }
+                       else
+                       {
+                               entry.SetFrequency(entry.GetFrequency() + frequency);
+                               if (storeOffsets)
+                               {
+                                       TermVectorOffsetInfo[] existingOffsets = entry.GetOffsets();
+                                       //A few diff. cases here:  offsets is null, existing offsets is null, both are null, same for positions
+                                       if (existingOffsets != null && offsets != null && offsets.Length > 0)
+                                       {
+                                               //copy over the existing offsets
+                                               TermVectorOffsetInfo[] newOffsets = new TermVectorOffsetInfo[existingOffsets.Length + offsets.Length];
+                                               Array.Copy(existingOffsets, 0, newOffsets, 0, existingOffsets.Length);
+                                               Array.Copy(offsets, 0, newOffsets, existingOffsets.Length, offsets.Length);
+                                               entry.SetOffsets(newOffsets);
+                                       }
+                                       else if (existingOffsets == null && offsets != null && offsets.Length > 0)
+                                       {
+                                               entry.SetOffsets(offsets);
+                                       }
+                                       //else leave it alone
+                               }
+                               if (storePositions)
+                               {
+                                       int[] existingPositions = entry.GetPositions();
+                                       if (existingPositions != null && positions != null && positions.Length > 0)
+                                       {
+                                               int[] newPositions = new int[existingPositions.Length + positions.Length];
+                                               Array.Copy(existingPositions, 0, newPositions, 0, existingPositions.Length);
+                                               Array.Copy(positions, 0, newPositions, existingPositions.Length, positions.Length);
+                                               entry.SetPositions(newPositions);
+                                       }
+                                       else if (existingPositions == null && positions != null && positions.Length > 0)
+                                       {
+                                               entry.SetPositions(positions);
+                                       }
+                               }
+                       }
+               }
+               
+               public override void  SetExpectations(System.String field, int numTerms, bool storeOffsets, bool storePositions)
+               {
+                       
+                       this.storeOffsets = storeOffsets;
+                       this.storePositions = storePositions;
+               }
+               
+               /// <summary> The TermVectorEntrySet.  A SortedSet of {@link TermVectorEntry} objects.  Sort is by the comparator passed into the constructor.
+               /// <br/>
+               /// This set will be empty until after the mapping process takes place.
+               /// 
+               /// </summary>
+               /// <returns> The SortedSet of {@link TermVectorEntry}.
+               /// </returns>
+        public virtual System.Collections.Generic.SortedDictionary<Object, Object> GetTermVectorEntrySet()
+               {
+                       return currentSet;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/StaleReaderException.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/StaleReaderException.cs
new file mode 100644 (file)
index 0000000..0b127dd
--- /dev/null
@@ -0,0 +1,39 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       /// <summary> This exception is thrown when an {@link IndexReader}
+       /// tries to make changes to the index (via {@link
+       /// IndexReader#deleteDocument}, {@link
+       /// IndexReader#undeleteAll} or {@link IndexReader#setNorm})
+       /// but changes have already been committed to the index
+       /// since this reader was instantiated.  When this happens
+       /// you must open a new reader on the current index to make
+       /// the changes.
+       /// </summary>
+       [Serializable]
+       public class StaleReaderException:System.IO.IOException
+       {
+               public StaleReaderException(System.String message):base(message)
+               {
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/StoredFieldsWriter.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/StoredFieldsWriter.cs
new file mode 100644 (file)
index 0000000..7b741cd
--- /dev/null
@@ -0,0 +1,266 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using RAMOutputStream = Mono.Lucene.Net.Store.RAMOutputStream;
+using ArrayUtil = Mono.Lucene.Net.Util.ArrayUtil;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       /// <summary>This is a DocFieldConsumer that writes stored fields. </summary>
+       sealed class StoredFieldsWriter
+       {
+               private void  InitBlock()
+               {
+                       docFreeList = new PerDoc[1];
+               }
+               
+               internal FieldsWriter fieldsWriter;
+               internal DocumentsWriter docWriter;
+               internal FieldInfos fieldInfos;
+               internal int lastDocID;
+               
+               internal PerDoc[] docFreeList;
+               internal int freeCount;
+               
+               public StoredFieldsWriter(DocumentsWriter docWriter, FieldInfos fieldInfos)
+               {
+                       InitBlock();
+                       this.docWriter = docWriter;
+                       this.fieldInfos = fieldInfos;
+               }
+               
+               public StoredFieldsWriterPerThread AddThread(DocumentsWriter.DocState docState)
+               {
+                       return new StoredFieldsWriterPerThread(docState, this);
+               }
+               
+               public void  Flush(SegmentWriteState state)
+               {
+                       lock (this)
+                       {
+                               
+                               if (state.numDocsInStore > 0)
+                               {
+                                       // It's possible that all documents seen in this segment
+                                       // hit non-aborting exceptions, in which case we will
+                                       // not have yet init'd the FieldsWriter:
+                                       InitFieldsWriter();
+                                       
+                                       // Fill fdx file to include any final docs that we
+                                       // skipped because they hit non-aborting exceptions
+                                       Fill(state.numDocsInStore - docWriter.GetDocStoreOffset());
+                               }
+                               
+                               if (fieldsWriter != null)
+                                       fieldsWriter.Flush();
+                       }
+               }
+               
+               private void  InitFieldsWriter()
+               {
+                       if (fieldsWriter == null)
+                       {
+                               System.String docStoreSegment = docWriter.GetDocStoreSegment();
+                               if (docStoreSegment != null)
+                               {
+                                       System.Diagnostics.Debug.Assert(docStoreSegment != null);
+                                       fieldsWriter = new FieldsWriter(docWriter.directory, docStoreSegment, fieldInfos);
+                                       docWriter.AddOpenFile(docStoreSegment + "." + IndexFileNames.FIELDS_EXTENSION);
+                                       docWriter.AddOpenFile(docStoreSegment + "." + IndexFileNames.FIELDS_INDEX_EXTENSION);
+                                       lastDocID = 0;
+                               }
+                       }
+               }
+               
+               public void  CloseDocStore(SegmentWriteState state)
+               {
+                       lock (this)
+                       {
+                               int inc = state.numDocsInStore - lastDocID;
+                               if (inc > 0)
+                               {
+                                       InitFieldsWriter();
+                                       Fill(state.numDocsInStore - docWriter.GetDocStoreOffset());
+                               }
+                               
+                               if (fieldsWriter != null)
+                               {
+                                       fieldsWriter.Close();
+                                       fieldsWriter = null;
+                                       lastDocID = 0;
+                                       System.Diagnostics.Debug.Assert(state.docStoreSegmentName != null);
+                    SupportClass.CollectionsHelper.AddIfNotContains(state.flushedFiles, state.docStoreSegmentName + "." + IndexFileNames.FIELDS_EXTENSION);
+                    SupportClass.CollectionsHelper.AddIfNotContains(state.flushedFiles, state.docStoreSegmentName + "." + IndexFileNames.FIELDS_INDEX_EXTENSION);
+                                       
+                                       state.docWriter.RemoveOpenFile(state.docStoreSegmentName + "." + IndexFileNames.FIELDS_EXTENSION);
+                                       state.docWriter.RemoveOpenFile(state.docStoreSegmentName + "." + IndexFileNames.FIELDS_INDEX_EXTENSION);
+                                       
+                                       System.String fileName = state.docStoreSegmentName + "." + IndexFileNames.FIELDS_INDEX_EXTENSION;
+                                       
+                                       if (4 + ((long) state.numDocsInStore) * 8 != state.directory.FileLength(fileName))
+                                               throw new System.SystemException("after flush: fdx size mismatch: " + state.numDocsInStore + " docs vs " + state.directory.FileLength(fileName) + " length in bytes of " + fileName + " file exists?=" + state.directory.FileExists(fileName));
+                               }
+                       }
+               }
+               
+               internal int allocCount;
+               
+               internal PerDoc GetPerDoc()
+               {
+                       lock (this)
+                       {
+                               if (freeCount == 0)
+                               {
+                                       allocCount++;
+                                       if (allocCount > docFreeList.Length)
+                                       {
+                                               // Grow our free list up front to make sure we have
+                                               // enough space to recycle all outstanding PerDoc
+                                               // instances
+                                               System.Diagnostics.Debug.Assert(allocCount == 1 + docFreeList.Length);
+                                               docFreeList = new PerDoc[ArrayUtil.GetNextSize(allocCount)];
+                                       }
+                                       return new PerDoc(this);
+                               }
+                               else
+                                       return docFreeList[--freeCount];
+                       }
+               }
+               
+               internal void  Abort()
+               {
+                       lock (this)
+                       {
+                               if (fieldsWriter != null)
+                               {
+                                       try
+                                       {
+                                               fieldsWriter.Close();
+                                       }
+                                       catch (System.Exception t)
+                                       {
+                                       }
+                                       fieldsWriter = null;
+                                       lastDocID = 0;
+                               }
+                       }
+               }
+               
+               /// <summary>Fills in any hole in the docIDs </summary>
+               internal void  Fill(int docID)
+               {
+                       int docStoreOffset = docWriter.GetDocStoreOffset();
+                       
+                       // We must "catch up" for all docs before us
+                       // that had no stored fields:
+                       int end = docID + docStoreOffset;
+                       while (lastDocID < end)
+                       {
+                               fieldsWriter.SkipDocument();
+                               lastDocID++;
+                       }
+               }
+               
+               internal void  FinishDocument(PerDoc perDoc)
+               {
+                       lock (this)
+                       {
+                               System.Diagnostics.Debug.Assert(docWriter.writer.TestPoint("StoredFieldsWriter.finishDocument start"));
+                               InitFieldsWriter();
+                               
+                               Fill(perDoc.docID);
+                               
+                               // Append stored fields to the real FieldsWriter:
+                               fieldsWriter.FlushDocument(perDoc.numStoredFields, perDoc.fdt);
+                               lastDocID++;
+                               perDoc.Reset();
+                               Free(perDoc);
+                               System.Diagnostics.Debug.Assert(docWriter.writer.TestPoint("StoredFieldsWriter.finishDocument end"));
+                       }
+               }
+               
+               public bool FreeRAM()
+               {
+                       return false;
+               }
+               
+               internal void  Free(PerDoc perDoc)
+               {
+                       lock (this)
+                       {
+                               System.Diagnostics.Debug.Assert(freeCount < docFreeList.Length);
+                               System.Diagnostics.Debug.Assert(0 == perDoc.numStoredFields);
+                               System.Diagnostics.Debug.Assert(0 == perDoc.fdt.Length());
+                               System.Diagnostics.Debug.Assert(0 == perDoc.fdt.GetFilePointer());
+                               docFreeList[freeCount++] = perDoc;
+                       }
+               }
+               
+               internal class PerDoc:DocumentsWriter.DocWriter
+               {
+                       public PerDoc(StoredFieldsWriter enclosingInstance)
+                       {
+                               InitBlock(enclosingInstance);
+                       }
+                       private void  InitBlock(StoredFieldsWriter enclosingInstance)
+                       {
+                               this.enclosingInstance = enclosingInstance;
+                buffer = enclosingInstance.docWriter.NewPerDocBuffer();
+                fdt = new RAMOutputStream(buffer);
+                       }
+                       private StoredFieldsWriter enclosingInstance;
+                       public StoredFieldsWriter Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+
+            internal DocumentsWriter.PerDocBuffer buffer ;
+            internal RAMOutputStream fdt;
+                       internal int numStoredFields;
+                       
+                       internal void  Reset()
+                       {
+                               fdt.Reset();
+                buffer.Recycle();
+                               numStoredFields = 0;
+                       }
+                       
+                       public override void  Abort()
+                       {
+                               Reset();
+                               Enclosing_Instance.Free(this);
+                       }
+                       
+                       public override long SizeInBytes()
+                       {
+                return buffer.GetSizeInBytes();
+                       }
+                       
+                       public override void  Finish()
+                       {
+                               Enclosing_Instance.FinishDocument(this);
+                       }
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/StoredFieldsWriterPerThread.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/StoredFieldsWriterPerThread.cs
new file mode 100644 (file)
index 0000000..a927454
--- /dev/null
@@ -0,0 +1,94 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using Fieldable = Mono.Lucene.Net.Documents.Fieldable;
+using IndexOutput = Mono.Lucene.Net.Store.IndexOutput;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       sealed class StoredFieldsWriterPerThread
+       {
+               
+               internal FieldsWriter localFieldsWriter;
+               internal StoredFieldsWriter storedFieldsWriter;
+               internal DocumentsWriter.DocState docState;
+               
+               internal StoredFieldsWriter.PerDoc doc;
+               
+               public StoredFieldsWriterPerThread(DocumentsWriter.DocState docState, StoredFieldsWriter storedFieldsWriter)
+               {
+                       this.storedFieldsWriter = storedFieldsWriter;
+                       this.docState = docState;
+                       localFieldsWriter = new FieldsWriter((IndexOutput) null, (IndexOutput) null, storedFieldsWriter.fieldInfos);
+               }
+               
+               public void  StartDocument()
+               {
+                       if (doc != null)
+                       {
+                               // Only happens if previous document hit non-aborting
+                               // exception while writing stored fields into
+                               // localFieldsWriter:
+                               doc.Reset();
+                               doc.docID = docState.docID;
+                       }
+               }
+               
+               public void  AddField(Fieldable field, FieldInfo fieldInfo)
+               {
+                       if (doc == null)
+                       {
+                               doc = storedFieldsWriter.GetPerDoc();
+                               doc.docID = docState.docID;
+                               localFieldsWriter.SetFieldsStream(doc.fdt);
+                               System.Diagnostics.Debug.Assert(doc.numStoredFields == 0, "doc.numStoredFields=" + doc.numStoredFields);
+                               System.Diagnostics.Debug.Assert(0 == doc.fdt.Length());
+                               System.Diagnostics.Debug.Assert(0 == doc.fdt.GetFilePointer());
+                       }
+                       
+                       localFieldsWriter.WriteField(fieldInfo, field);
+                       System.Diagnostics.Debug.Assert(docState.TestPoint("StoredFieldsWriterPerThread.processFields.writeField"));
+                       doc.numStoredFields++;
+               }
+               
+               public DocumentsWriter.DocWriter FinishDocument()
+               {
+                       // If there were any stored fields in this doc, doc will
+                       // be non-null; else it's null.
+                       try
+                       {
+                               return doc;
+                       }
+                       finally
+                       {
+                               doc = null;
+                       }
+               }
+               
+               public void  Abort()
+               {
+                       if (doc != null)
+                       {
+                               doc.Abort();
+                               doc = null;
+                       }
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/Term.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/Term.cs
new file mode 100644 (file)
index 0000000..3a8d7e7
--- /dev/null
@@ -0,0 +1,179 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using StringHelper = Mono.Lucene.Net.Util.StringHelper;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       /// <summary>A Term represents a word from text.  This is the unit of search.  It is
+       /// composed of two elements, the text of the word, as a string, and the name of
+       /// the field that the text occured in, an interned string.
+       /// Note that terms may represent more than words from text fields, but also
+       /// things like dates, email addresses, urls, etc.  
+       /// </summary>
+       
+       [Serializable]
+    public sealed class Term : System.IComparable
+       {
+               internal System.String field;
+               internal System.String text;
+               
+               /// <summary>Constructs a Term with the given field and text.
+               /// <p/>Note that a null field or null text value results in undefined
+               /// behavior for most Lucene APIs that accept a Term parameter. 
+               /// </summary>
+               public Term(System.String fld, System.String txt)
+               {
+                       field = StringHelper.Intern(fld);
+                       text = txt;
+               }
+               
+               /// <summary>Constructs a Term with the given field and empty text.
+               /// This serves two purposes: 1) reuse of a Term with the same field.
+               /// 2) pattern for a query.
+               /// 
+               /// </summary>
+               /// <param name="fld">
+               /// </param>
+               public Term(System.String fld):this(fld, "", true)
+               {
+               }
+               
+               internal Term(System.String fld, System.String txt, bool intern)
+               {
+                       field = intern?StringHelper.Intern(fld):fld; // field names are interned
+                       text = txt; // unless already known to be
+               }
+               
+               /// <summary>Returns the field of this term, an interned string.   The field indicates
+               /// the part of a document which this term came from. 
+               /// </summary>
+               public System.String Field()
+               {
+                       return field;
+               }
+               
+               /// <summary>Returns the text of this term.  In the case of words, this is simply the
+               /// text of the word.  In the case of dates and other types, this is an
+               /// encoding of the object as a string.  
+               /// </summary>
+               public System.String Text()
+               {
+                       return text;
+               }
+               
+               /// <summary> Optimized construction of new Terms by reusing same field as this Term
+               /// - avoids field.intern() overhead 
+               /// </summary>
+               /// <param name="text">The text of the new term (field is implicitly same as this Term instance)
+               /// </param>
+               /// <returns> A new Term
+               /// </returns>
+               public Term CreateTerm(System.String text)
+               {
+                       return new Term(field, text, false);
+               }
+               
+               //@Override
+               public  override bool Equals(System.Object obj)
+               {
+                       if (this == obj)
+                               return true;
+                       if (obj == null)
+                               return false;
+                       if (GetType() != obj.GetType())
+                               return false;
+                       Term other = (Term) obj;
+                       if (field == null)
+                       {
+                               if (other.field != null)
+                                       return false;
+                       }
+                       else if (!field.Equals(other.field))
+                               return false;
+                       if (text == null)
+                       {
+                               if (other.text != null)
+                                       return false;
+                       }
+                       else if (!text.Equals(other.text))
+                               return false;
+                       return true;
+               }
+               
+               //@Override
+               public override int GetHashCode()
+               {
+                       int prime = 31;
+                       int result = 1;
+                       result = prime * result + ((field == null)?0:field.GetHashCode());
+                       result = prime * result + ((text == null)?0:text.GetHashCode());
+                       return result;
+               }
+               
+               public int CompareTo(System.Object other)
+               {
+                       return CompareTo((Term) other);
+               }
+               
+               /// <summary>Compares two terms, returning a negative integer if this
+               /// term belongs before the argument, zero if this term is equal to the
+               /// argument, and a positive integer if this term belongs after the argument.
+               /// The ordering of terms is first by field, then by text.
+               /// </summary>
+               public int CompareTo(Term other)
+               {
+                       if ((System.Object) field == (System.Object) other.field)
+                       // fields are interned
+                               return String.CompareOrdinal(text, other.text);
+                       else
+                               return String.CompareOrdinal(field, other.field);
+               }
+               
+               /// <summary>Resets the field and text of a Term. </summary>
+               internal void  Set(System.String fld, System.String txt)
+               {
+                       field = fld;
+                       text = txt;
+               }
+               
+               public override System.String ToString()
+               {
+                       return field + ":" + text;
+               }
+               
+//             private void  ReadObject(System.IO.BinaryReader in_Renamed)
+//             {
+//                     in_Renamed.defaultReadObject();
+//                     field = StringHelper.Intern(field);
+//             }
+
+        [System.Runtime.Serialization.OnDeserialized]
+        internal void OnDeserialized(System.Runtime.Serialization.StreamingContext context)
+        {
+            field = StringHelper.Intern(field);
+        }
+
+        public System.String text_ForNUnit
+        {
+            get { return text; }
+        }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/TermBuffer.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/TermBuffer.cs
new file mode 100644 (file)
index 0000000..cb29d30
--- /dev/null
@@ -0,0 +1,166 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexInput = Mono.Lucene.Net.Store.IndexInput;
+using UnicodeUtil = Mono.Lucene.Net.Util.UnicodeUtil;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       sealed class TermBuffer : System.ICloneable
+       {
+               
+               private System.String field;
+               private Term term; // cached
+               private bool preUTF8Strings; // true if strings are stored in modified UTF8 encoding (LUCENE-510)
+               private bool dirty; // true if text was set externally (ie not read via UTF8 bytes)
+               
+               private UnicodeUtil.UTF16Result text = new UnicodeUtil.UTF16Result();
+               private UnicodeUtil.UTF8Result bytes = new UnicodeUtil.UTF8Result();
+               
+               public int CompareTo(TermBuffer other)
+               {
+                       if ((System.Object) field == (System.Object) other.field)
+                       // fields are interned
+                               return CompareChars(text.result, text.length, other.text.result, other.text.length);
+                       else
+                               return String.CompareOrdinal(field, other.field);
+               }
+               
+               private static int CompareChars(char[] chars1, int len1, char[] chars2, int len2)
+               {
+                       int end = len1 < len2?len1:len2;
+                       for (int k = 0; k < end; k++)
+                       {
+                               char c1 = chars1[k];
+                               char c2 = chars2[k];
+                               if (c1 != c2)
+                               {
+                                       return c1 - c2;
+                               }
+                       }
+                       return len1 - len2;
+               }
+               
+               /// <summary>Call this if the IndexInput passed to {@link #read}
+               /// stores terms in the "modified UTF8" (pre LUCENE-510)
+               /// format. 
+               /// </summary>
+               internal void  SetPreUTF8Strings()
+               {
+                       preUTF8Strings = true;
+               }
+               
+               public void  Read(IndexInput input, FieldInfos fieldInfos)
+               {
+                       this.term = null; // invalidate cache
+                       int start = input.ReadVInt();
+                       int length = input.ReadVInt();
+                       int totalLength = start + length;
+                       if (preUTF8Strings)
+                       {
+                               text.SetLength(totalLength);
+                               input.ReadChars(text.result, start, length);
+                       }
+                       else
+                       {
+                               
+                               if (dirty)
+                               {
+                                       // Fully convert all bytes since bytes is dirty
+                                       UnicodeUtil.UTF16toUTF8(text.result, 0, text.length, bytes);
+                                       bytes.SetLength(totalLength);
+                                       input.ReadBytes(bytes.result, start, length);
+                                       UnicodeUtil.UTF8toUTF16(bytes.result, 0, totalLength, text);
+                                       dirty = false;
+                               }
+                               else
+                               {
+                                       // Incrementally convert only the UTF8 bytes that are new:
+                                       bytes.SetLength(totalLength);
+                                       input.ReadBytes(bytes.result, start, length);
+                                       UnicodeUtil.UTF8toUTF16(bytes.result, start, length, text);
+                               }
+                       }
+                       this.field = fieldInfos.FieldName(input.ReadVInt());
+               }
+               
+               public void  Set(Term term)
+               {
+                       if (term == null)
+                       {
+                               Reset();
+                               return ;
+                       }
+                       System.String termText = term.Text();
+                       int termLen = termText.Length;
+                       text.SetLength(termLen);
+                       SupportClass.TextSupport.GetCharsFromString(termText, 0, termLen, text.result, 0);
+                       dirty = true;
+                       field = term.Field();
+                       this.term = term;
+               }
+               
+               public void  Set(TermBuffer other)
+               {
+                       text.CopyText(other.text);
+                       dirty = true;
+                       field = other.field;
+                       term = other.term;
+               }
+               
+               public void  Reset()
+               {
+                       field = null;
+                       text.SetLength(0);
+                       term = null;
+                       dirty = true;
+               }
+               
+               public Term ToTerm()
+               {
+                       if (field == null)
+                       // unset
+                               return null;
+                       
+                       if (term == null)
+                               term = new Term(field, new System.String(text.result, 0, text.length), false);
+                       
+                       return term;
+               }
+               
+               public System.Object Clone()
+               {
+                       TermBuffer clone = null;
+                       try
+                       {
+                               clone = (TermBuffer) base.MemberwiseClone();
+                       }
+                       catch (System.Exception e)
+                       {
+                       }
+                       
+                       clone.dirty = true;
+                       clone.bytes = new UnicodeUtil.UTF8Result();
+                       clone.text = new UnicodeUtil.UTF16Result();
+                       clone.text.CopyText(text);
+                       return clone;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/TermDocs.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/TermDocs.cs
new file mode 100644 (file)
index 0000000..cfec6ee
--- /dev/null
@@ -0,0 +1,87 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       /// <summary>TermDocs provides an interface for enumerating &lt;document, frequency&gt;
+       /// pairs for a term.  <p/> The document portion names each document containing
+       /// the term.  Documents are indicated by number.  The frequency portion gives
+       /// the number of times the term occurred in each document.  <p/> The pairs are
+       /// ordered by document number.
+       /// </summary>
+       /// <seealso cref="IndexReader.TermDocs()">
+       /// </seealso>
+       
+       public interface TermDocs
+       {
+               /// <summary>Sets this to the data for a term.
+               /// The enumeration is reset to the start of the data for this term.
+               /// </summary>
+               void  Seek(Term term);
+               
+               /// <summary>Sets this to the data for the current term in a {@link TermEnum}.
+               /// This may be optimized in some implementations.
+               /// </summary>
+               void  Seek(TermEnum termEnum);
+               
+               /// <summary>Returns the current document number.  <p/> This is invalid until {@link
+               /// #Next()} is called for the first time.
+               /// </summary>
+               int Doc();
+               
+               /// <summary>Returns the frequency of the term within the current document.  <p/> This
+               /// is invalid until {@link #Next()} is called for the first time.
+               /// </summary>
+               int Freq();
+               
+               /// <summary>Moves to the next pair in the enumeration.  <p/> Returns true iff there is
+               /// such a next pair in the enumeration. 
+               /// </summary>
+               bool Next();
+               
+               /// <summary>Attempts to read multiple entries from the enumeration, up to length of
+               /// <i>docs</i>.  Document numbers are stored in <i>docs</i>, and term
+               /// frequencies are stored in <i>freqs</i>.  The <i>freqs</i> array must be as
+               /// long as the <i>docs</i> array.
+               /// 
+               /// <p/>Returns the number of entries read.  Zero is only returned when the
+               /// stream has been exhausted.  
+               /// </summary>
+               int Read(int[] docs, int[] freqs);
+               
+               /// <summary>Skips entries to the first beyond the current whose document number is
+               /// greater than or equal to <i>target</i>. <p/>Returns true iff there is such
+               /// an entry.  <p/>Behaves as if written: <pre>
+               /// boolean skipTo(int target) {
+               /// do {
+               /// if (!next())
+               /// return false;
+               /// } while (target > doc());
+               /// return true;
+               /// }
+               /// </pre>
+               /// Some implementations are considerably more efficient than that.
+               /// </summary>
+               bool SkipTo(int target);
+               
+               /// <summary>Frees associated resources. </summary>
+               void  Close();
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/TermEnum.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/TermEnum.cs
new file mode 100644 (file)
index 0000000..a13f883
--- /dev/null
@@ -0,0 +1,72 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       /// <summary>Abstract class for enumerating terms.
+       /// <p/>Term enumerations are always ordered by Term.compareTo().  Each term in
+       /// the enumeration is greater than all that precede it.  
+       /// </summary>
+       
+       public abstract class TermEnum
+       {
+               /// <summary>Increments the enumeration to the next element.  True if one exists.</summary>
+               public abstract bool Next();
+               
+               /// <summary>Returns the current Term in the enumeration.</summary>
+               public abstract Term Term();
+               
+               /// <summary>Returns the docFreq of the current Term in the enumeration.</summary>
+               public abstract int DocFreq();
+               
+               /// <summary>Closes the enumeration to further activity, freeing resources. </summary>
+               public abstract void  Close();
+               
+               /// <summary>Skips terms to the first beyond the current whose value is
+               /// greater or equal to <i>target</i>. <p/>Returns true iff there is such
+               /// an entry.  <p/>Behaves as if written: <pre>
+               /// public boolean skipTo(Term target) {
+               /// do {
+               /// if (!next())
+               /// return false;
+               /// } while (target > term());
+               /// return true;
+               /// }
+               /// </pre>
+               /// Some implementations *could* be considerably more efficient than a linear scan.
+               /// Check the implementation to be sure.
+               /// </summary>
+               /// <deprecated> This method is not performant and will be removed in Lucene 3.0.
+               /// Use {@link IndexReader#Terms(Term)} to create a new TermEnum positioned at a
+               /// given term.
+               /// </deprecated>
+        [Obsolete("This method is not performant and will be removed in Lucene 3.0.Use IndexReader.Terms(Term) to create a new TermEnum positioned at a given term.")]
+               public virtual bool SkipTo(Term target)
+               {
+                       do 
+                       {
+                               if (!Next())
+                                       return false;
+                       }
+                       while (target.CompareTo(Term()) > 0);
+                       return true;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/TermFreqVector.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/TermFreqVector.cs
new file mode 100644 (file)
index 0000000..a7016e7
--- /dev/null
@@ -0,0 +1,75 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       /// <summary>Provides access to stored term vector of 
+       /// a document field.  The vector consists of the name of the field, an array of the terms tha occur in the field of the
+       /// {@link Mono.Lucene.Net.Documents.Document} and a parallel array of frequencies.  Thus, getTermFrequencies()[5] corresponds with the
+       /// frequency of getTerms()[5], assuming there are at least 5 terms in the Document.
+       /// </summary>
+       public interface TermFreqVector
+       {
+               /// <summary> The {@link Mono.Lucene.Net.Documents.Fieldable} name. </summary>
+               /// <returns> The name of the field this vector is associated with.
+               /// 
+               /// </returns>
+               System.String GetField();
+               
+               /// <returns> The number of terms in the term vector.
+               /// </returns>
+               int Size();
+               
+               /// <returns> An Array of term texts in ascending order.
+               /// </returns>
+               System.String[] GetTerms();
+               
+               
+               /// <summary>Array of term frequencies. Locations of the array correspond one to one
+               /// to the terms in the array obtained from <code>getTerms</code>
+               /// method. Each location in the array contains the number of times this
+               /// term occurs in the document or the document field.
+               /// </summary>
+               int[] GetTermFrequencies();
+               
+               
+               /// <summary>Return an index in the term numbers array returned from
+               /// <code>getTerms</code> at which the term with the specified
+               /// <code>term</code> appears. If this term does not appear in the array,
+               /// return -1.
+               /// </summary>
+               int IndexOf(System.String term);
+               
+               
+               /// <summary>Just like <code>indexOf(int)</code> but searches for a number of terms
+               /// at the same time. Returns an array that has the same size as the number
+               /// of terms searched for, each slot containing the result of searching for
+               /// that term number.
+               /// 
+               /// </summary>
+               /// <param name="terms">array containing terms to look for
+               /// </param>
+               /// <param name="start">index in the array where the list of terms starts
+               /// </param>
+               /// <param name="len">the number of terms in the list
+               /// </param>
+               int[] IndexesOf(System.String[] terms, int start, int len);
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/TermInfo.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/TermInfo.cs
new file mode 100644 (file)
index 0000000..b1b4412
--- /dev/null
@@ -0,0 +1,69 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       /// <summary>A TermInfo is the record of information stored for a term.</summary>
+       
+       sealed class TermInfo
+       {
+               /// <summary>The number of documents which contain the term. </summary>
+               internal int docFreq = 0;
+               
+               internal long freqPointer = 0;
+               internal long proxPointer = 0;
+               internal int skipOffset;
+               
+               internal TermInfo()
+               {
+               }
+               
+               internal TermInfo(int df, long fp, long pp)
+               {
+                       docFreq = df;
+                       freqPointer = fp;
+                       proxPointer = pp;
+               }
+               
+               internal TermInfo(TermInfo ti)
+               {
+                       docFreq = ti.docFreq;
+                       freqPointer = ti.freqPointer;
+                       proxPointer = ti.proxPointer;
+                       skipOffset = ti.skipOffset;
+               }
+               
+               internal void  Set(int docFreq, long freqPointer, long proxPointer, int skipOffset)
+               {
+                       this.docFreq = docFreq;
+                       this.freqPointer = freqPointer;
+                       this.proxPointer = proxPointer;
+                       this.skipOffset = skipOffset;
+               }
+               
+               internal void  Set(TermInfo ti)
+               {
+                       docFreq = ti.docFreq;
+                       freqPointer = ti.freqPointer;
+                       proxPointer = ti.proxPointer;
+                       skipOffset = ti.skipOffset;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/TermInfosReader.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/TermInfosReader.cs
new file mode 100644 (file)
index 0000000..a331eeb
--- /dev/null
@@ -0,0 +1,319 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using Directory = Mono.Lucene.Net.Store.Directory;
+using CloseableThreadLocal = Mono.Lucene.Net.Util.CloseableThreadLocal;
+using SimpleLRUCache = Mono.Lucene.Net.Util.Cache.SimpleLRUCache;
+
+namespace Mono.Lucene.Net.Index
+{
+
+    /// <summary>This stores a monotonically increasing set of &lt;Term, TermInfo&gt; pairs in a
+       /// Directory.  Pairs are accessed either by Term or by ordinal position the
+       /// set.  
+       /// </summary>
+       
+       sealed class TermInfosReader
+       {
+               private Directory directory;
+               private System.String segment;
+               private FieldInfos fieldInfos;
+               
+               private CloseableThreadLocal threadResources = new CloseableThreadLocal();
+               private SegmentTermEnum origEnum;
+               private long size;
+               
+               private Term[] indexTerms;
+               private TermInfo[] indexInfos;
+               private long[] indexPointers;
+               
+               private int totalIndexInterval;
+               
+               private const int DEFAULT_CACHE_SIZE = 1024;
+               
+               /// <summary> Per-thread resources managed by ThreadLocal</summary>
+               private sealed class ThreadResources
+               {
+                       internal SegmentTermEnum termEnum;
+                       
+                       // Used for caching the least recently looked-up Terms
+                       internal Mono.Lucene.Net.Util.Cache.Cache termInfoCache;
+               }
+               
+               internal TermInfosReader(Directory dir, System.String seg, FieldInfos fis, int readBufferSize, int indexDivisor)
+               {
+                       bool success = false;
+                       
+                       if (indexDivisor < 1 && indexDivisor != - 1)
+                       {
+                               throw new System.ArgumentException("indexDivisor must be -1 (don't load terms index) or greater than 0: got " + indexDivisor);
+                       }
+                       
+                       try
+                       {
+                               directory = dir;
+                               segment = seg;
+                               fieldInfos = fis;
+                               
+                               origEnum = new SegmentTermEnum(directory.OpenInput(segment + "." + IndexFileNames.TERMS_EXTENSION, readBufferSize), fieldInfos, false);
+                               size = origEnum.size;
+                               
+                               
+                               if (indexDivisor != - 1)
+                               {
+                                       // Load terms index
+                                       totalIndexInterval = origEnum.indexInterval * indexDivisor;
+                                       SegmentTermEnum indexEnum = new SegmentTermEnum(directory.OpenInput(segment + "." + IndexFileNames.TERMS_INDEX_EXTENSION, readBufferSize), fieldInfos, true);
+                                       
+                                       try
+                                       {
+                                               int indexSize = 1 + ((int) indexEnum.size - 1) / indexDivisor; // otherwise read index
+                                               
+                                               indexTerms = new Term[indexSize];
+                                               indexInfos = new TermInfo[indexSize];
+                                               indexPointers = new long[indexSize];
+                                               
+                                               for (int i = 0; indexEnum.Next(); i++)
+                                               {
+                                                       indexTerms[i] = indexEnum.Term();
+                                                       indexInfos[i] = indexEnum.TermInfo();
+                                                       indexPointers[i] = indexEnum.indexPointer;
+                                                       
+                                                       for (int j = 1; j < indexDivisor; j++)
+                                                               if (!indexEnum.Next())
+                                                                       break;
+                                               }
+                                       }
+                                       finally
+                                       {
+                                               indexEnum.Close();
+                                       }
+                               }
+                               else
+                               {
+                                       // Do not load terms index:
+                                       totalIndexInterval = - 1;
+                                       indexTerms = null;
+                                       indexInfos = null;
+                                       indexPointers = null;
+                               }
+                               success = true;
+                       }
+                       finally
+                       {
+                               // With lock-less commits, it's entirely possible (and
+                               // fine) to hit a FileNotFound exception above. In
+                               // this case, we want to explicitly close any subset
+                               // of things that were opened so that we don't have to
+                               // wait for a GC to do so.
+                               if (!success)
+                               {
+                                       Close();
+                               }
+                       }
+               }
+               
+               public int GetSkipInterval()
+               {
+                       return origEnum.skipInterval;
+               }
+               
+               public int GetMaxSkipLevels()
+               {
+                       return origEnum.maxSkipLevels;
+               }
+               
+               internal void  Close()
+               {
+                       if (origEnum != null)
+                               origEnum.Close();
+                       threadResources.Close();
+               }
+               
+               /// <summary>Returns the number of term/value pairs in the set. </summary>
+               internal long Size()
+               {
+                       return size;
+               }
+               
+               private ThreadResources GetThreadResources()
+               {
+                       ThreadResources resources = (ThreadResources) threadResources.Get();
+                       if (resources == null)
+                       {
+                               resources = new ThreadResources();
+                               resources.termEnum = Terms();
+                               // Cache does not have to be thread-safe, it is only used by one thread at the same time
+                               resources.termInfoCache = new SimpleLRUCache(DEFAULT_CACHE_SIZE);
+                               threadResources.Set(resources);
+                       }
+                       return resources;
+               }
+               
+               
+               /// <summary>Returns the offset of the greatest index entry which is less than or equal to term.</summary>
+               private int GetIndexOffset(Term term)
+               {
+                       int lo = 0; // binary search indexTerms[]
+                       int hi = indexTerms.Length - 1;
+                       
+                       while (hi >= lo)
+                       {
+                               int mid = SupportClass.Number.URShift((lo + hi), 1);
+                               int delta = term.CompareTo(indexTerms[mid]);
+                               if (delta < 0)
+                                       hi = mid - 1;
+                               else if (delta > 0)
+                                       lo = mid + 1;
+                               else
+                                       return mid;
+                       }
+                       return hi;
+               }
+               
+               private void  SeekEnum(SegmentTermEnum enumerator, int indexOffset)
+               {
+                       enumerator.Seek(indexPointers[indexOffset], ((long)indexOffset * totalIndexInterval) - 1, indexTerms[indexOffset], indexInfos[indexOffset]);
+               }
+               
+               /// <summary>Returns the TermInfo for a Term in the set, or null. </summary>
+               internal TermInfo Get(Term term)
+               {
+                       return Get(term, true);
+               }
+               
+               /// <summary>Returns the TermInfo for a Term in the set, or null. </summary>
+               private TermInfo Get(Term term, bool useCache)
+               {
+                       if (size == 0)
+                               return null;
+                       
+                       EnsureIndexIsRead();
+                       
+                       TermInfo ti;
+                       ThreadResources resources = GetThreadResources();
+                       Mono.Lucene.Net.Util.Cache.Cache cache = null;
+                       
+                       if (useCache)
+                       {
+                               cache = resources.termInfoCache;
+                               // check the cache first if the term was recently looked up
+                               ti = (TermInfo) cache.Get(term);
+                               if (ti != null)
+                               {
+                                       return ti;
+                               }
+                       }
+                       
+                       // optimize sequential access: first try scanning cached enum w/o seeking
+                       SegmentTermEnum enumerator = resources.termEnum;
+                       if (enumerator.Term() != null && ((enumerator.Prev() != null && term.CompareTo(enumerator.Prev()) > 0) || term.CompareTo(enumerator.Term()) >= 0))
+                       {
+                               int enumOffset = (int) (enumerator.position / totalIndexInterval) + 1;
+                               if (indexTerms.Length == enumOffset || term.CompareTo(indexTerms[enumOffset]) < 0)
+                               {
+                                       // no need to seek
+                                       
+                                       int numScans = enumerator.ScanTo(term);
+                                       if (enumerator.Term() != null && term.CompareTo(enumerator.Term()) == 0)
+                                       {
+                                               ti = enumerator.TermInfo();
+                                               if (cache != null && numScans > 1)
+                                               {
+                                                       // we only  want to put this TermInfo into the cache if
+                                                       // scanEnum skipped more than one dictionary entry.
+                                                       // This prevents RangeQueries or WildcardQueries to 
+                                                       // wipe out the cache when they iterate over a large numbers
+                                                       // of terms in order
+                                                       cache.Put(term, ti);
+                                               }
+                                       }
+                                       else
+                                       {
+                                               ti = null;
+                                       }
+                                       
+                                       return ti;
+                               }
+                       }
+                       
+                       // random-access: must seek
+                       SeekEnum(enumerator, GetIndexOffset(term));
+                       enumerator.ScanTo(term);
+                       if (enumerator.Term() != null && term.CompareTo(enumerator.Term()) == 0)
+                       {
+                               ti = enumerator.TermInfo();
+                               if (cache != null)
+                               {
+                                       cache.Put(term, ti);
+                               }
+                       }
+                       else
+                       {
+                               ti = null;
+                       }
+                       return ti;
+               }
+                                               
+               private void  EnsureIndexIsRead()
+               {
+                       if (indexTerms == null)
+                       {
+                               throw new System.SystemException("terms index was not loaded when this reader was created");
+                       }
+               }
+               
+               /// <summary>Returns the position of a Term in the set or -1. </summary>
+               internal long GetPosition(Term term)
+               {
+                       if (size == 0)
+                               return - 1;
+                       
+                       EnsureIndexIsRead();
+                       int indexOffset = GetIndexOffset(term);
+                       
+                       SegmentTermEnum enumerator = GetThreadResources().termEnum;
+                       SeekEnum(enumerator, indexOffset);
+                       
+                       while (term.CompareTo(enumerator.Term()) > 0 && enumerator.Next())
+                       {
+                       }
+                       
+                       if (term.CompareTo(enumerator.Term()) == 0)
+                               return enumerator.position;
+                       else
+                               return - 1;
+               }
+               
+               /// <summary>Returns an enumeration of all the Terms and TermInfos in the set. </summary>
+               public SegmentTermEnum Terms()
+               {
+                       return (SegmentTermEnum) origEnum.Clone();
+               }
+               
+               /// <summary>Returns an enumeration of terms starting at or after the named term. </summary>
+               public SegmentTermEnum Terms(Term term)
+               {
+                       // don't use the cache in this call because we want to reposition the
+                       // enumeration
+                       Get(term, false);
+                       return (SegmentTermEnum) GetThreadResources().termEnum.Clone();
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/TermInfosWriter.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/TermInfosWriter.cs
new file mode 100644 (file)
index 0000000..d60b617
--- /dev/null
@@ -0,0 +1,243 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using Directory = Mono.Lucene.Net.Store.Directory;
+using IndexOutput = Mono.Lucene.Net.Store.IndexOutput;
+using UnicodeUtil = Mono.Lucene.Net.Util.UnicodeUtil;
+
+namespace Mono.Lucene.Net.Index
+{
+
+    /// <summary>This stores a monotonically increasing set of &lt;Term, TermInfo&gt; pairs in a
+       /// Directory.  A TermInfos can be written once, in order.  
+       /// </summary>
+       
+       sealed class TermInfosWriter
+       {
+               /// <summary>The file format version, a negative number. </summary>
+               public const int FORMAT = - 3;
+               
+               // Changed strings to true utf8 with length-in-bytes not
+               // length-in-chars
+               public const int FORMAT_VERSION_UTF8_LENGTH_IN_BYTES = - 4;
+               
+               // NOTE: always change this if you switch to a new format!
+               public static readonly int FORMAT_CURRENT = FORMAT_VERSION_UTF8_LENGTH_IN_BYTES;
+               
+               private FieldInfos fieldInfos;
+               private IndexOutput output;
+               private TermInfo lastTi = new TermInfo();
+               private long size;
+               
+               // TODO: the default values for these two parameters should be settable from
+               // IndexWriter.  However, once that's done, folks will start setting them to
+               // ridiculous values and complaining that things don't work well, as with
+               // mergeFactor.  So, let's wait until a number of folks find that alternate
+               // values work better.  Note that both of these values are stored in the
+               // segment, so that it's safe to change these w/o rebuilding all indexes.
+               
+               /// <summary>Expert: The fraction of terms in the "dictionary" which should be stored
+               /// in RAM.  Smaller values use more memory, but make searching slightly
+               /// faster, while larger values use less memory and make searching slightly
+               /// slower.  Searching is typically not dominated by dictionary lookup, so
+               /// tweaking this is rarely useful.
+               /// </summary>
+               internal int indexInterval = 128;
+               
+               /// <summary>Expert: The fraction of {@link TermDocs} entries stored in skip tables,
+               /// used to accellerate {@link TermDocs#SkipTo(int)}.  Larger values result in
+               /// smaller indexes, greater acceleration, but fewer accelerable cases, while
+               /// smaller values result in bigger indexes, less acceleration and more
+               /// accelerable cases. More detailed experiments would be useful here. 
+               /// </summary>
+               internal int skipInterval = 16;
+               
+               /// <summary>Expert: The maximum number of skip levels. Smaller values result in 
+               /// slightly smaller indexes, but slower skipping in big posting lists.
+               /// </summary>
+               internal int maxSkipLevels = 10;
+               
+               private long lastIndexPointer;
+               private bool isIndex;
+               private byte[] lastTermBytes = new byte[10];
+               private int lastTermBytesLength = 0;
+               private int lastFieldNumber = - 1;
+               
+               private TermInfosWriter other;
+               private UnicodeUtil.UTF8Result utf8Result = new UnicodeUtil.UTF8Result();
+               
+               internal TermInfosWriter(Directory directory, System.String segment, FieldInfos fis, int interval)
+               {
+                       Initialize(directory, segment, fis, interval, false);
+                       other = new TermInfosWriter(directory, segment, fis, interval, true);
+                       other.other = this;
+               }
+               
+               private TermInfosWriter(Directory directory, System.String segment, FieldInfos fis, int interval, bool isIndex)
+               {
+                       Initialize(directory, segment, fis, interval, isIndex);
+               }
+               
+               private void  Initialize(Directory directory, System.String segment, FieldInfos fis, int interval, bool isi)
+               {
+                       indexInterval = interval;
+                       fieldInfos = fis;
+                       isIndex = isi;
+                       output = directory.CreateOutput(segment + (isIndex?".tii":".tis"));
+                       output.WriteInt(FORMAT_CURRENT); // write format
+                       output.WriteLong(0); // leave space for size
+                       output.WriteInt(indexInterval); // write indexInterval
+                       output.WriteInt(skipInterval); // write skipInterval
+                       output.WriteInt(maxSkipLevels); // write maxSkipLevels
+                       System.Diagnostics.Debug.Assert(InitUTF16Results());
+               }
+               
+               internal void  Add(Term term, TermInfo ti)
+               {
+                       UnicodeUtil.UTF16toUTF8(term.text, 0, term.text.Length, utf8Result);
+                       Add(fieldInfos.FieldNumber(term.field), utf8Result.result, utf8Result.length, ti);
+               }
+               
+               // Currently used only by assert statements
+               internal UnicodeUtil.UTF16Result utf16Result1;
+               internal UnicodeUtil.UTF16Result utf16Result2;
+               
+               // Currently used only by assert statements
+               private bool InitUTF16Results()
+               {
+                       utf16Result1 = new UnicodeUtil.UTF16Result();
+                       utf16Result2 = new UnicodeUtil.UTF16Result();
+                       return true;
+               }
+               
+               // Currently used only by assert statement
+               private int CompareToLastTerm(int fieldNumber, byte[] termBytes, int termBytesLength)
+               {
+                       
+                       if (lastFieldNumber != fieldNumber)
+                       {
+                               int cmp = String.CompareOrdinal(fieldInfos.FieldName(lastFieldNumber), fieldInfos.FieldName(fieldNumber));
+                               // If there is a field named "" (empty string) then we
+                               // will get 0 on this comparison, yet, it's "OK".  But
+                               // it's not OK if two different field numbers map to
+                               // the same name.
+                               if (cmp != 0 || lastFieldNumber != - 1)
+                                       return cmp;
+                       }
+                       
+                       UnicodeUtil.UTF8toUTF16(lastTermBytes, 0, lastTermBytesLength, utf16Result1);
+                       UnicodeUtil.UTF8toUTF16(termBytes, 0, termBytesLength, utf16Result2);
+                       int len;
+                       if (utf16Result1.length < utf16Result2.length)
+                               len = utf16Result1.length;
+                       else
+                               len = utf16Result2.length;
+                       
+                       for (int i = 0; i < len; i++)
+                       {
+                               char ch1 = utf16Result1.result[i];
+                               char ch2 = utf16Result2.result[i];
+                               if (ch1 != ch2)
+                                       return ch1 - ch2;
+                       }
+                       return utf16Result1.length - utf16Result2.length;
+               }
+
+        /// <summary>Adds a new &lt;fieldNumber, termBytes&gt;, TermInfo> pair to the set.
+               /// Term must be lexicographically greater than all previous Terms added.
+               /// TermInfo pointers must be positive and greater than all previous.
+               /// </summary>
+               internal void  Add(int fieldNumber, byte[] termBytes, int termBytesLength, TermInfo ti)
+               {
+                       
+                       System.Diagnostics.Debug.Assert(CompareToLastTerm(fieldNumber, termBytes, termBytesLength) < 0 ||
+                                       (isIndex && termBytesLength == 0 && lastTermBytesLength == 0), 
+                               "Terms are out of order: field=" + fieldInfos.FieldName(fieldNumber) + " (number " + fieldNumber + ")" + 
+                               " lastField=" + fieldInfos.FieldName(lastFieldNumber) + " (number " + lastFieldNumber + ")" + 
+                               " text=" + System.Text.Encoding.UTF8.GetString(termBytes, 0, termBytesLength) + " lastText=" + System.Text.Encoding.UTF8.GetString(lastTermBytes, 0, lastTermBytesLength));
+                       
+                       System.Diagnostics.Debug.Assert(ti.freqPointer >= lastTi.freqPointer, "freqPointer out of order (" + ti.freqPointer + " < " + lastTi.freqPointer + ")");
+                       System.Diagnostics.Debug.Assert(ti.proxPointer >= lastTi.proxPointer, "proxPointer out of order (" + ti.proxPointer + " < " + lastTi.proxPointer + ")");
+                       
+                       if (!isIndex && size % indexInterval == 0)
+                               other.Add(lastFieldNumber, lastTermBytes, lastTermBytesLength, lastTi); // add an index term
+                       
+                       WriteTerm(fieldNumber, termBytes, termBytesLength); // write term
+                       
+                       output.WriteVInt(ti.docFreq); // write doc freq
+                       output.WriteVLong(ti.freqPointer - lastTi.freqPointer); // write pointers
+                       output.WriteVLong(ti.proxPointer - lastTi.proxPointer);
+                       
+                       if (ti.docFreq >= skipInterval)
+                       {
+                               output.WriteVInt(ti.skipOffset);
+                       }
+                       
+                       if (isIndex)
+                       {
+                               output.WriteVLong(other.output.GetFilePointer() - lastIndexPointer);
+                               lastIndexPointer = other.output.GetFilePointer(); // write pointer
+                       }
+                       
+                       lastFieldNumber = fieldNumber;
+                       lastTi.Set(ti);
+                       size++;
+               }
+               
+               private void  WriteTerm(int fieldNumber, byte[] termBytes, int termBytesLength)
+               {
+                       
+                       // TODO: UTF16toUTF8 could tell us this prefix
+                       // Compute prefix in common with last term:
+                       int start = 0;
+                       int limit = termBytesLength < lastTermBytesLength?termBytesLength:lastTermBytesLength;
+                       while (start < limit)
+                       {
+                               if (termBytes[start] != lastTermBytes[start])
+                                       break;
+                               start++;
+                       }
+                       
+                       int length = termBytesLength - start;
+                       output.WriteVInt(start); // write shared prefix length
+                       output.WriteVInt(length); // write delta length
+                       output.WriteBytes(termBytes, start, length); // write delta bytes
+                       output.WriteVInt(fieldNumber); // write field num
+                       if (lastTermBytes.Length < termBytesLength)
+                       {
+                               byte[] newArray = new byte[(int) (termBytesLength * 1.5)];
+                               Array.Copy(lastTermBytes, 0, newArray, 0, start);
+                               lastTermBytes = newArray;
+                       }
+                       Array.Copy(termBytes, start, lastTermBytes, start, length);
+                       lastTermBytesLength = termBytesLength;
+               }
+               
+               /// <summary>Called to complete TermInfos creation. </summary>
+               internal void  Close()
+               {
+                       output.Seek(4); // write size after format
+                       output.WriteLong(size);
+                       output.Close();
+                       
+                       if (!isIndex)
+                               other.Close();
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/TermPositionVector.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/TermPositionVector.cs
new file mode 100644 (file)
index 0000000..b6a978b
--- /dev/null
@@ -0,0 +1,50 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       /// <summary>Extends <code>TermFreqVector</code> to provide additional information about
+       /// positions in which each of the terms is found. A TermPositionVector not necessarily
+       /// contains both positions and offsets, but at least one of these arrays exists.
+       /// </summary>
+       public interface TermPositionVector:TermFreqVector
+       {
+               
+               /// <summary>Returns an array of positions in which the term is found.
+               /// Terms are identified by the index at which its number appears in the
+               /// term String array obtained from the <code>indexOf</code> method.
+               /// May return null if positions have not been stored.
+               /// </summary>
+               int[] GetTermPositions(int index);
+               
+               /// <summary> Returns an array of TermVectorOffsetInfo in which the term is found.
+               /// May return null if offsets have not been stored.
+               /// 
+               /// </summary>
+               /// <seealso cref="Mono.Lucene.Net.Analysis.Token">
+               /// 
+               /// </seealso>
+               /// <param name="index">The position in the array to get the offsets from
+               /// </param>
+               /// <returns> An array of TermVectorOffsetInfo objects or the empty list
+               /// </returns>
+               TermVectorOffsetInfo[] GetOffsets(int index);
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/TermPositions.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/TermPositions.cs
new file mode 100644 (file)
index 0000000..56fd153
--- /dev/null
@@ -0,0 +1,81 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       /// <summary> TermPositions provides an interface for enumerating the &lt;document,
+       /// frequency, &lt;position&gt;* &gt; tuples for a term.  <p/> The document and
+       /// frequency are the same as for a TermDocs.  The positions portion lists the ordinal
+       /// positions of each occurrence of a term in a document.
+       /// 
+       /// </summary>
+       /// <seealso cref="IndexReader.TermPositions()">
+       /// </seealso>
+       
+       public interface TermPositions:TermDocs
+       {
+               /// <summary>Returns next position in the current document.  It is an error to call
+               /// this more than {@link #Freq()} times
+               /// without calling {@link #Next()}<p/> This is
+               /// invalid until {@link #Next()} is called for
+               /// the first time.
+               /// </summary>
+               int NextPosition();
+               
+               /// <summary> Returns the length of the payload at the current term position.
+               /// This is invalid until {@link #NextPosition()} is called for
+               /// the first time.<br/>
+               /// </summary>
+               /// <returns> length of the current payload in number of bytes
+               /// </returns>
+               int GetPayloadLength();
+               
+               /// <summary> Returns the payload data at the current term position.
+               /// This is invalid until {@link #NextPosition()} is called for
+               /// the first time.
+               /// This method must not be called more than once after each call
+               /// of {@link #NextPosition()}. However, payloads are loaded lazily,
+               /// so if the payload data for the current position is not needed,
+               /// this method may not be called at all for performance reasons.<br/>
+               /// 
+               /// </summary>
+               /// <param name="data">the array into which the data of this payload is to be
+               /// stored, if it is big enough; otherwise, a new byte[] array
+               /// is allocated for this purpose. 
+               /// </param>
+               /// <param name="offset">the offset in the array into which the data of this payload
+               /// is to be stored.
+               /// </param>
+               /// <returns> a byte[] array containing the data of this payload
+               /// </returns>
+               /// <throws>  IOException </throws>
+               byte[] GetPayload(byte[] data, int offset);
+               
+               /// <summary> Checks if a payload can be loaded at this position.
+               /// <p/>
+               /// Payloads can only be loaded once per call to 
+               /// {@link #NextPosition()}.
+               /// 
+               /// </summary>
+               /// <returns> true if there is a payload available at this position that can be loaded
+               /// </returns>
+               bool IsPayloadAvailable();
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/TermVectorEntry.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/TermVectorEntry.cs
new file mode 100644 (file)
index 0000000..59bd50e
--- /dev/null
@@ -0,0 +1,114 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       /// <summary> Convenience class for holding TermVector information.</summary>
+       public class TermVectorEntry
+       {
+               private System.String field;
+               private System.String term;
+               private int frequency;
+               private TermVectorOffsetInfo[] offsets;
+               internal int[] positions;
+               
+               
+               public TermVectorEntry()
+               {
+               }
+               
+               public TermVectorEntry(System.String field, System.String term, int frequency, TermVectorOffsetInfo[] offsets, int[] positions)
+               {
+                       this.field = field;
+                       this.term = term;
+                       this.frequency = frequency;
+                       this.offsets = offsets;
+                       this.positions = positions;
+               }
+               
+               
+               public virtual System.String GetField()
+               {
+                       return field;
+               }
+               
+               public virtual int GetFrequency()
+               {
+                       return frequency;
+               }
+               
+               public virtual TermVectorOffsetInfo[] GetOffsets()
+               {
+                       return offsets;
+               }
+               
+               public virtual int[] GetPositions()
+               {
+                       return positions;
+               }
+               
+               public virtual System.String GetTerm()
+               {
+                       return term;
+               }
+               
+               //Keep package local
+               internal virtual void  SetFrequency(int frequency)
+               {
+                       this.frequency = frequency;
+               }
+               
+               internal virtual void  SetOffsets(TermVectorOffsetInfo[] offsets)
+               {
+                       this.offsets = offsets;
+               }
+               
+               internal virtual void  SetPositions(int[] positions)
+               {
+                       this.positions = positions;
+               }
+               
+               
+               public  override bool Equals(System.Object o)
+               {
+                       if (this == o)
+                               return true;
+                       if (o == null || GetType() != o.GetType())
+                               return false;
+                       
+                       TermVectorEntry that = (TermVectorEntry) o;
+                       
+                       if (term != null?!term.Equals(that.term):that.term != null)
+                               return false;
+                       
+                       return true;
+               }
+               
+               public override int GetHashCode()
+               {
+                       return (term != null?term.GetHashCode():0);
+               }
+               
+               public override System.String ToString()
+               {
+                       return "TermVectorEntry{" + "field='" + field + '\'' + ", term='" + term + '\'' + ", frequency=" + frequency + '}';
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/TermVectorEntryFreqSortedComparator.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/TermVectorEntryFreqSortedComparator.cs
new file mode 100644 (file)
index 0000000..dbbd612
--- /dev/null
@@ -0,0 +1,48 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       /// <summary> Compares {@link Mono.Lucene.Net.Index.TermVectorEntry}s first by frequency and then by
+       /// the term (case-sensitive)
+       /// 
+       /// 
+       /// </summary>
+       //public class TermVectorEntryFreqSortedComparator : System.Collections.IComparer
+    public class TermVectorEntryFreqSortedComparator : System.Collections.Generic.IComparer<System.Object>
+       {
+               public virtual int Compare(System.Object object_Renamed, System.Object object1)
+               {
+                       int result = 0;
+                       TermVectorEntry entry = (TermVectorEntry) object_Renamed;
+                       TermVectorEntry entry1 = (TermVectorEntry) object1;
+                       result = entry1.GetFrequency() - entry.GetFrequency();
+                       if (result == 0)
+                       {
+                               result = String.CompareOrdinal(entry.GetTerm(), entry1.GetTerm());
+                               if (result == 0)
+                               {
+                                       result = String.CompareOrdinal(entry.GetField(), entry1.GetField());
+                               }
+                       }
+                       return result;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/TermVectorMapper.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/TermVectorMapper.cs
new file mode 100644 (file)
index 0000000..d12ff33
--- /dev/null
@@ -0,0 +1,114 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       /// <summary> The TermVectorMapper can be used to map Term Vectors into your own
+       /// structure instead of the parallel array structure used by
+       /// {@link Mono.Lucene.Net.Index.IndexReader#GetTermFreqVector(int,String)}.
+       /// <p/>
+       /// It is up to the implementation to make sure it is thread-safe.
+       /// 
+       /// 
+       /// 
+       /// </summary>
+       public abstract class TermVectorMapper
+       {
+               
+               private bool ignoringPositions;
+               private bool ignoringOffsets;
+               
+               
+               protected internal TermVectorMapper()
+               {
+               }
+               
+               /// <summary> </summary>
+               /// <param name="ignoringPositions">true if this mapper should tell Lucene to ignore positions even if they are stored
+               /// </param>
+               /// <param name="ignoringOffsets">similar to ignoringPositions
+               /// </param>
+               protected internal TermVectorMapper(bool ignoringPositions, bool ignoringOffsets)
+               {
+                       this.ignoringPositions = ignoringPositions;
+                       this.ignoringOffsets = ignoringOffsets;
+               }
+               
+               /// <summary> Tell the mapper what to expect in regards to field, number of terms, offset and position storage.
+               /// This method will be called once before retrieving the vector for a field.
+               /// 
+               /// This method will be called before {@link #Map(String,int,TermVectorOffsetInfo[],int[])}.
+               /// </summary>
+               /// <param name="field">The field the vector is for
+               /// </param>
+               /// <param name="numTerms">The number of terms that need to be mapped
+               /// </param>
+               /// <param name="storeOffsets">true if the mapper should expect offset information
+               /// </param>
+               /// <param name="storePositions">true if the mapper should expect positions info
+               /// </param>
+               public abstract void  SetExpectations(System.String field, int numTerms, bool storeOffsets, bool storePositions);
+               /// <summary> Map the Term Vector information into your own structure</summary>
+               /// <param name="term">The term to add to the vector
+               /// </param>
+               /// <param name="frequency">The frequency of the term in the document
+               /// </param>
+               /// <param name="offsets">null if the offset is not specified, otherwise the offset into the field of the term
+               /// </param>
+               /// <param name="positions">null if the position is not specified, otherwise the position in the field of the term
+               /// </param>
+               public abstract void  Map(System.String term, int frequency, TermVectorOffsetInfo[] offsets, int[] positions);
+               
+               /// <summary> Indicate to Lucene that even if there are positions stored, this mapper is not interested in them and they
+               /// can be skipped over.  Derived classes should set this to true if they want to ignore positions.  The default
+               /// is false, meaning positions will be loaded if they are stored.
+               /// </summary>
+               /// <returns> false
+               /// </returns>
+               public virtual bool IsIgnoringPositions()
+               {
+                       return ignoringPositions;
+               }
+               
+               /// <summary> </summary>
+               /// <seealso cref="IsIgnoringPositions()"> Same principal as {@link #IsIgnoringPositions()}, but applied to offsets.  false by default.
+               /// </seealso>
+               /// <returns> false
+               /// </returns>
+               public virtual bool IsIgnoringOffsets()
+               {
+                       return ignoringOffsets;
+               }
+               
+               /// <summary> Passes down the index of the document whose term vector is currently being mapped,
+               /// once for each top level call to a term vector reader.
+               /// <p/>
+               /// Default implementation IGNORES the document number.  Override if your implementation needs the document number.
+               /// <p/> 
+               /// NOTE: Document numbers are internal to Lucene and subject to change depending on indexing operations.
+               /// 
+               /// </summary>
+               /// <param name="documentNumber">index of document currently being mapped
+               /// </param>
+               public virtual void  SetDocumentNumber(int documentNumber)
+               {
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/TermVectorOffsetInfo.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/TermVectorOffsetInfo.cs
new file mode 100644 (file)
index 0000000..815c3e2
--- /dev/null
@@ -0,0 +1,106 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using System.Runtime.InteropServices;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       /// <summary> The TermVectorOffsetInfo class holds information pertaining to a Term in a {@link Mono.Lucene.Net.Index.TermPositionVector}'s
+       /// offset information.  This offset information is the character offset as set during the Analysis phase (and thus may not be the actual offset in the
+       /// original content).
+       /// </summary>
+       [Serializable]
+       public class TermVectorOffsetInfo
+       {
+               /// <summary> Convenience declaration when creating a {@link Mono.Lucene.Net.Index.TermPositionVector} that stores only position information.</summary>
+               [NonSerialized]
+               public static readonly TermVectorOffsetInfo[] EMPTY_OFFSET_INFO = new TermVectorOffsetInfo[0];
+               private int startOffset;
+               private int endOffset;
+               
+               public TermVectorOffsetInfo()
+               {
+               }
+               
+               public TermVectorOffsetInfo(int startOffset, int endOffset)
+               {
+                       this.endOffset = endOffset;
+                       this.startOffset = startOffset;
+               }
+               
+               /// <summary> The accessor for the ending offset for the term</summary>
+               /// <returns> The offset
+               /// </returns>
+               public virtual int GetEndOffset()
+               {
+                       return endOffset;
+               }
+               
+               public virtual void  SetEndOffset(int endOffset)
+               {
+                       this.endOffset = endOffset;
+               }
+               
+               /// <summary> The accessor for the starting offset of the term.
+               /// 
+               /// </summary>
+               /// <returns> The offset
+               /// </returns>
+               public virtual int GetStartOffset()
+               {
+                       return startOffset;
+               }
+               
+               public virtual void  SetStartOffset(int startOffset)
+               {
+                       this.startOffset = startOffset;
+               }
+               
+               /// <summary> Two TermVectorOffsetInfos are equals if both the start and end offsets are the same</summary>
+               /// <param name="o">The comparison Object
+               /// </param>
+               /// <returns> true if both {@link #GetStartOffset()} and {@link #GetEndOffset()} are the same for both objects.
+               /// </returns>
+               public  override bool Equals(System.Object o)
+               {
+                       if (this == o)
+                               return true;
+                       if (!(o is TermVectorOffsetInfo))
+                               return false;
+                       
+                       TermVectorOffsetInfo termVectorOffsetInfo = (TermVectorOffsetInfo) o;
+                       
+                       if (endOffset != termVectorOffsetInfo.endOffset)
+                               return false;
+                       if (startOffset != termVectorOffsetInfo.startOffset)
+                               return false;
+                       
+                       return true;
+               }
+               
+               public override int GetHashCode()
+               {
+                       int result;
+                       result = startOffset;
+                       result = 29 * result + endOffset;
+                       return result;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/TermVectorsReader.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/TermVectorsReader.cs
new file mode 100644 (file)
index 0000000..2976fe1
--- /dev/null
@@ -0,0 +1,721 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using BufferedIndexInput = Mono.Lucene.Net.Store.BufferedIndexInput;
+using Directory = Mono.Lucene.Net.Store.Directory;
+using IndexInput = Mono.Lucene.Net.Store.IndexInput;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       /// <version>  $Id: TermVectorsReader.java 687046 2008-08-19 13:01:11Z mikemccand $
+       /// </version>
+       public class TermVectorsReader : System.ICloneable
+       {
+               
+               // NOTE: if you make a new format, it must be larger than
+               // the current format
+               internal const int FORMAT_VERSION = 2;
+               
+               // Changes to speed up bulk merging of term vectors:
+               internal const int FORMAT_VERSION2 = 3;
+               
+               // Changed strings to UTF8 with length-in-bytes not length-in-chars
+               internal const int FORMAT_UTF8_LENGTH_IN_BYTES = 4;
+               
+               // NOTE: always change this if you switch to a new format!
+               internal static readonly int FORMAT_CURRENT = FORMAT_UTF8_LENGTH_IN_BYTES;
+               
+               //The size in bytes that the FORMAT_VERSION will take up at the beginning of each file 
+               internal const int FORMAT_SIZE = 4;
+               
+               internal const byte STORE_POSITIONS_WITH_TERMVECTOR = (byte) (0x1);
+               internal const byte STORE_OFFSET_WITH_TERMVECTOR = (byte) (0x2);
+               
+               private FieldInfos fieldInfos;
+               
+               private IndexInput tvx;
+               private IndexInput tvd;
+               private IndexInput tvf;
+               private int size;
+               private int numTotalDocs;
+               
+               // The docID offset where our docs begin in the index
+               // file.  This will be 0 if we have our own private file.
+               private int docStoreOffset;
+               
+               private int format;
+               
+               public /*internal*/ TermVectorsReader(Directory d, System.String segment, FieldInfos fieldInfos):this(d, segment, fieldInfos, BufferedIndexInput.BUFFER_SIZE)
+               {
+               }
+               
+               internal TermVectorsReader(Directory d, System.String segment, FieldInfos fieldInfos, int readBufferSize):this(d, segment, fieldInfos, readBufferSize, - 1, 0)
+               {
+               }
+               
+               internal TermVectorsReader(Directory d, System.String segment, FieldInfos fieldInfos, int readBufferSize, int docStoreOffset, int size)
+               {
+                       bool success = false;
+                       
+                       try
+                       {
+                if (d.FileExists(segment + "." + IndexFileNames.VECTORS_INDEX_EXTENSION))
+                {
+                    tvx = d.OpenInput(segment + "." + IndexFileNames.VECTORS_INDEX_EXTENSION, readBufferSize);
+                    format = CheckValidFormat(tvx);
+                    tvd = d.OpenInput(segment + "." + IndexFileNames.VECTORS_DOCUMENTS_EXTENSION, readBufferSize);
+                    int tvdFormat = CheckValidFormat(tvd);
+                    tvf = d.OpenInput(segment + "." + IndexFileNames.VECTORS_FIELDS_EXTENSION, readBufferSize);
+                    int tvfFormat = CheckValidFormat(tvf);
+
+                    System.Diagnostics.Debug.Assert(format == tvdFormat);
+                    System.Diagnostics.Debug.Assert(format == tvfFormat);
+
+                    if (format >= FORMAT_VERSION2)
+                    {
+                        System.Diagnostics.Debug.Assert((tvx.Length() - FORMAT_SIZE) % 16 == 0);
+                        numTotalDocs = (int)(tvx.Length() >> 4);
+                    }
+                    else
+                    {
+                        System.Diagnostics.Debug.Assert((tvx.Length() - FORMAT_SIZE) % 8 == 0);
+                        numTotalDocs = (int)(tvx.Length() >> 3);
+                    }
+
+                    if (-1 == docStoreOffset)
+                    {
+                        this.docStoreOffset = 0;
+                        this.size = numTotalDocs;
+                        System.Diagnostics.Debug.Assert(size == 0 || numTotalDocs == size);
+                    }
+                    else
+                    {
+                        this.docStoreOffset = docStoreOffset;
+                        this.size = size;
+                        // Verify the file is long enough to hold all of our
+                        // docs
+                        System.Diagnostics.Debug.Assert(numTotalDocs >= size + docStoreOffset, "numTotalDocs=" + numTotalDocs + " size=" + size + " docStoreOffset=" + docStoreOffset);
+                    }
+                }
+                else
+                {
+                    // If all documents flushed in a segment had hit
+                    // non-aborting exceptions, it's possible that
+                    // FieldInfos.hasVectors returns true yet the term
+                    // vector files don't exist.
+                    format = 0;
+                }
+
+                               
+                               this.fieldInfos = fieldInfos;
+                               success = true;
+                       }
+                       finally
+                       {
+                               // With lock-less commits, it's entirely possible (and
+                               // fine) to hit a FileNotFound exception above. In
+                               // this case, we want to explicitly close any subset
+                               // of things that were opened so that we don't have to
+                               // wait for a GC to do so.
+                               if (!success)
+                               {
+                                       Close();
+                               }
+                       }
+               }
+               
+               // Used for bulk copy when merging
+               internal virtual IndexInput GetTvdStream()
+               {
+                       return tvd;
+               }
+               
+               // Used for bulk copy when merging
+               internal virtual IndexInput GetTvfStream()
+               {
+                       return tvf;
+               }
+               
+               private void  SeekTvx(int docNum)
+               {
+                       if (format < FORMAT_VERSION2)
+                               tvx.Seek((docNum + docStoreOffset) * 8L + FORMAT_SIZE);
+                       else
+                               tvx.Seek((docNum + docStoreOffset) * 16L + FORMAT_SIZE);
+               }
+               
+               internal virtual bool CanReadRawDocs()
+               {
+                       return format >= FORMAT_UTF8_LENGTH_IN_BYTES;
+               }
+               
+               /// <summary>Retrieve the length (in bytes) of the tvd and tvf
+               /// entries for the next numDocs starting with
+               /// startDocID.  This is used for bulk copying when
+               /// merging segments, if the field numbers are
+               /// congruent.  Once this returns, the tvf &amp; tvd streams
+               /// are seeked to the startDocID. 
+               /// </summary>
+               internal void  RawDocs(int[] tvdLengths, int[] tvfLengths, int startDocID, int numDocs)
+               {
+                       
+                       if (tvx == null)
+                       {
+                for (int i = 0; i < tvdLengths.Length; i++)
+                {
+                    tvdLengths[i] = 0;
+                }
+                for (int i = 0; i < tvfLengths.Length; i++)
+                {
+                    tvfLengths[i] = 0;
+                }
+                               return ;
+                       }
+                       
+                       // SegmentMerger calls canReadRawDocs() first and should
+                       // not call us if that returns false.
+                       if (format < FORMAT_VERSION2)
+                               throw new System.SystemException("cannot read raw docs with older term vector formats");
+                       
+                       SeekTvx(startDocID);
+                       
+                       long tvdPosition = tvx.ReadLong();
+                       tvd.Seek(tvdPosition);
+                       
+                       long tvfPosition = tvx.ReadLong();
+                       tvf.Seek(tvfPosition);
+                       
+                       long lastTvdPosition = tvdPosition;
+                       long lastTvfPosition = tvfPosition;
+                       
+                       int count = 0;
+                       while (count < numDocs)
+                       {
+                               int docID = docStoreOffset + startDocID + count + 1;
+                               System.Diagnostics.Debug.Assert(docID <= numTotalDocs);
+                               if (docID < numTotalDocs)
+                               {
+                                       tvdPosition = tvx.ReadLong();
+                                       tvfPosition = tvx.ReadLong();
+                               }
+                               else
+                               {
+                                       tvdPosition = tvd.Length();
+                                       tvfPosition = tvf.Length();
+                                       System.Diagnostics.Debug.Assert(count == numDocs - 1);
+                               }
+                               tvdLengths[count] = (int) (tvdPosition - lastTvdPosition);
+                               tvfLengths[count] = (int) (tvfPosition - lastTvfPosition);
+                               count++;
+                               lastTvdPosition = tvdPosition;
+                               lastTvfPosition = tvfPosition;
+                       }
+               }
+               
+               private int CheckValidFormat(IndexInput in_Renamed)
+               {
+                       int format = in_Renamed.ReadInt();
+                       if (format > FORMAT_CURRENT)
+                       {
+                               throw new CorruptIndexException("Incompatible format version: " + format + " expected " + FORMAT_CURRENT + " or less");
+                       }
+                       return format;
+               }
+               
+               internal virtual void  Close()
+               {
+                       // make all effort to close up. Keep the first exception
+                       // and throw it as a new one.
+                       System.IO.IOException keep = null;
+                       if (tvx != null)
+                               try
+                               {
+                                       tvx.Close();
+                               }
+                               catch (System.IO.IOException e)
+                               {
+                                       if (keep == null)
+                                               keep = e;
+                               }
+                       if (tvd != null)
+                               try
+                               {
+                                       tvd.Close();
+                               }
+                               catch (System.IO.IOException e)
+                               {
+                                       if (keep == null)
+                                               keep = e;
+                               }
+                       if (tvf != null)
+                               try
+                               {
+                                       tvf.Close();
+                               }
+                               catch (System.IO.IOException e)
+                               {
+                                       if (keep == null)
+                                               keep = e;
+                               }
+                       if (keep != null)
+                       {
+                throw new System.IO.IOException(keep.StackTrace);
+                       }
+               }
+               
+               /// <summary> </summary>
+               /// <returns> The number of documents in the reader
+               /// </returns>
+               internal virtual int Size()
+               {
+                       return size;
+               }
+               
+               public virtual void  Get(int docNum, System.String field, TermVectorMapper mapper)
+               {
+                       if (tvx != null)
+                       {
+                               int fieldNumber = fieldInfos.FieldNumber(field);
+                               //We need to account for the FORMAT_SIZE at when seeking in the tvx
+                               //We don't need to do this in other seeks because we already have the
+                               // file pointer
+                               //that was written in another file
+                               SeekTvx(docNum);
+                               //System.out.println("TVX Pointer: " + tvx.getFilePointer());
+                               long tvdPosition = tvx.ReadLong();
+                               
+                               tvd.Seek(tvdPosition);
+                               int fieldCount = tvd.ReadVInt();
+                               //System.out.println("Num Fields: " + fieldCount);
+                               // There are only a few fields per document. We opt for a full scan
+                               // rather then requiring that they be ordered. We need to read through
+                               // all of the fields anyway to get to the tvf pointers.
+                               int number = 0;
+                               int found = - 1;
+                               for (int i = 0; i < fieldCount; i++)
+                               {
+                                       if (format >= FORMAT_VERSION)
+                                               number = tvd.ReadVInt();
+                                       else
+                                               number += tvd.ReadVInt();
+                                       
+                                       if (number == fieldNumber)
+                                               found = i;
+                               }
+                               
+                               // This field, although valid in the segment, was not found in this
+                               // document
+                               if (found != - 1)
+                               {
+                                       // Compute position in the tvf file
+                                       long position;
+                                       if (format >= FORMAT_VERSION2)
+                                               position = tvx.ReadLong();
+                                       else
+                                               position = tvd.ReadVLong();
+                                       for (int i = 1; i <= found; i++)
+                                               position += tvd.ReadVLong();
+                                       
+                                       mapper.SetDocumentNumber(docNum);
+                                       ReadTermVector(field, position, mapper);
+                               }
+                               else
+                               {
+                                       //System.out.println("Fieldable not found");
+                               }
+                       }
+                       else
+                       {
+                               //System.out.println("No tvx file");
+                       }
+               }
+               
+               
+               
+               /// <summary> Retrieve the term vector for the given document and field</summary>
+               /// <param name="docNum">The document number to retrieve the vector for
+               /// </param>
+               /// <param name="field">The field within the document to retrieve
+               /// </param>
+               /// <returns> The TermFreqVector for the document and field or null if there is no termVector for this field.
+               /// </returns>
+               /// <throws>  IOException if there is an error reading the term vector files </throws>
+               public /*internal*/ virtual TermFreqVector Get(int docNum, System.String field)
+               {
+                       // Check if no term vectors are available for this segment at all
+                       ParallelArrayTermVectorMapper mapper = new ParallelArrayTermVectorMapper();
+                       Get(docNum, field, mapper);
+                       
+                       return mapper.MaterializeVector();
+               }
+               
+               // Reads the String[] fields; you have to pre-seek tvd to
+               // the right point
+               private System.String[] ReadFields(int fieldCount)
+               {
+                       int number = 0;
+                       System.String[] fields = new System.String[fieldCount];
+                       
+                       for (int i = 0; i < fieldCount; i++)
+                       {
+                               if (format >= FORMAT_VERSION)
+                                       number = tvd.ReadVInt();
+                               else
+                                       number += tvd.ReadVInt();
+                               
+                               fields[i] = fieldInfos.FieldName(number);
+                       }
+                       
+                       return fields;
+               }
+               
+               // Reads the long[] offsets into TVF; you have to pre-seek
+               // tvx/tvd to the right point
+               private long[] ReadTvfPointers(int fieldCount)
+               {
+                       // Compute position in the tvf file
+                       long position;
+                       if (format >= FORMAT_VERSION2)
+                               position = tvx.ReadLong();
+                       else
+                               position = tvd.ReadVLong();
+                       
+                       long[] tvfPointers = new long[fieldCount];
+                       tvfPointers[0] = position;
+                       
+                       for (int i = 1; i < fieldCount; i++)
+                       {
+                               position += tvd.ReadVLong();
+                               tvfPointers[i] = position;
+                       }
+                       
+                       return tvfPointers;
+               }
+               
+               /// <summary> Return all term vectors stored for this document or null if the could not be read in.
+               /// 
+               /// </summary>
+               /// <param name="docNum">The document number to retrieve the vector for
+               /// </param>
+               /// <returns> All term frequency vectors
+               /// </returns>
+               /// <throws>  IOException if there is an error reading the term vector files  </throws>
+               public /*internal*/ virtual TermFreqVector[] Get(int docNum)
+               {
+                       TermFreqVector[] result = null;
+                       if (tvx != null)
+                       {
+                               //We need to offset by
+                               SeekTvx(docNum);
+                               long tvdPosition = tvx.ReadLong();
+                               
+                               tvd.Seek(tvdPosition);
+                               int fieldCount = tvd.ReadVInt();
+                               
+                               // No fields are vectorized for this document
+                               if (fieldCount != 0)
+                               {
+                                       System.String[] fields = ReadFields(fieldCount);
+                                       long[] tvfPointers = ReadTvfPointers(fieldCount);
+                                       result = ReadTermVectors(docNum, fields, tvfPointers);
+                               }
+                       }
+                       else
+                       {
+                               //System.out.println("No tvx file");
+                       }
+                       return result;
+               }
+               
+               public virtual void  Get(int docNumber, TermVectorMapper mapper)
+               {
+                       // Check if no term vectors are available for this segment at all
+                       if (tvx != null)
+                       {
+                               //We need to offset by
+                               
+                               SeekTvx(docNumber);
+                               long tvdPosition = tvx.ReadLong();
+                               
+                               tvd.Seek(tvdPosition);
+                               int fieldCount = tvd.ReadVInt();
+                               
+                               // No fields are vectorized for this document
+                               if (fieldCount != 0)
+                               {
+                                       System.String[] fields = ReadFields(fieldCount);
+                                       long[] tvfPointers = ReadTvfPointers(fieldCount);
+                                       mapper.SetDocumentNumber(docNumber);
+                                       ReadTermVectors(fields, tvfPointers, mapper);
+                               }
+                       }
+                       else
+                       {
+                               //System.out.println("No tvx file");
+                       }
+               }
+               
+               
+               private SegmentTermVector[] ReadTermVectors(int docNum, System.String[] fields, long[] tvfPointers)
+               {
+                       SegmentTermVector[] res = new SegmentTermVector[fields.Length];
+                       for (int i = 0; i < fields.Length; i++)
+                       {
+                               ParallelArrayTermVectorMapper mapper = new ParallelArrayTermVectorMapper();
+                               mapper.SetDocumentNumber(docNum);
+                               ReadTermVector(fields[i], tvfPointers[i], mapper);
+                               res[i] = (SegmentTermVector) mapper.MaterializeVector();
+                       }
+                       return res;
+               }
+               
+               private void  ReadTermVectors(System.String[] fields, long[] tvfPointers, TermVectorMapper mapper)
+               {
+                       for (int i = 0; i < fields.Length; i++)
+                       {
+                               ReadTermVector(fields[i], tvfPointers[i], mapper);
+                       }
+               }
+               
+               
+               /// <summary> </summary>
+               /// <param name="field">The field to read in
+               /// </param>
+               /// <param name="tvfPointer">The pointer within the tvf file where we should start reading
+               /// </param>
+               /// <param name="mapper">The mapper used to map the TermVector
+               /// </param>
+               /// <throws>  IOException </throws>
+               private void  ReadTermVector(System.String field, long tvfPointer, TermVectorMapper mapper)
+               {
+                       
+                       // Now read the data from specified position
+                       //We don't need to offset by the FORMAT here since the pointer already includes the offset
+                       tvf.Seek(tvfPointer);
+                       
+                       int numTerms = tvf.ReadVInt();
+                       //System.out.println("Num Terms: " + numTerms);
+                       // If no terms - return a constant empty termvector. However, this should never occur!
+                       if (numTerms == 0)
+                               return ;
+                       
+                       bool storePositions;
+                       bool storeOffsets;
+                       
+                       if (format >= FORMAT_VERSION)
+                       {
+                               byte bits = tvf.ReadByte();
+                               storePositions = (bits & STORE_POSITIONS_WITH_TERMVECTOR) != 0;
+                               storeOffsets = (bits & STORE_OFFSET_WITH_TERMVECTOR) != 0;
+                       }
+                       else
+                       {
+                               tvf.ReadVInt();
+                               storePositions = false;
+                               storeOffsets = false;
+                       }
+                       mapper.SetExpectations(field, numTerms, storeOffsets, storePositions);
+                       int start = 0;
+                       int deltaLength = 0;
+                       int totalLength = 0;
+                       byte[] byteBuffer;
+                       char[] charBuffer;
+                       bool preUTF8 = format < FORMAT_UTF8_LENGTH_IN_BYTES;
+                       
+                       // init the buffers
+                       if (preUTF8)
+                       {
+                               charBuffer = new char[10];
+                               byteBuffer = null;
+                       }
+                       else
+                       {
+                               charBuffer = null;
+                               byteBuffer = new byte[20];
+                       }
+                       
+                       for (int i = 0; i < numTerms; i++)
+                       {
+                               start = tvf.ReadVInt();
+                               deltaLength = tvf.ReadVInt();
+                               totalLength = start + deltaLength;
+                               
+                               System.String term;
+                               
+                               if (preUTF8)
+                               {
+                                       // Term stored as java chars
+                                       if (charBuffer.Length < totalLength)
+                                       {
+                                               char[] newCharBuffer = new char[(int) (1.5 * totalLength)];
+                                               Array.Copy(charBuffer, 0, newCharBuffer, 0, start);
+                                               charBuffer = newCharBuffer;
+                                       }
+                                       tvf.ReadChars(charBuffer, start, deltaLength);
+                                       term = new System.String(charBuffer, 0, totalLength);
+                               }
+                               else
+                               {
+                                       // Term stored as utf8 bytes
+                                       if (byteBuffer.Length < totalLength)
+                                       {
+                                               byte[] newByteBuffer = new byte[(int) (1.5 * totalLength)];
+                                               Array.Copy(byteBuffer, 0, newByteBuffer, 0, start);
+                                               byteBuffer = newByteBuffer;
+                                       }
+                                       tvf.ReadBytes(byteBuffer, start, deltaLength);
+                    term = System.Text.Encoding.UTF8.GetString(byteBuffer, 0, totalLength);
+                               }
+                               int freq = tvf.ReadVInt();
+                               int[] positions = null;
+                               if (storePositions)
+                               {
+                                       //read in the positions
+                                       //does the mapper even care about positions?
+                                       if (mapper.IsIgnoringPositions() == false)
+                                       {
+                                               positions = new int[freq];
+                                               int prevPosition = 0;
+                                               for (int j = 0; j < freq; j++)
+                                               {
+                                                       positions[j] = prevPosition + tvf.ReadVInt();
+                                                       prevPosition = positions[j];
+                                               }
+                                       }
+                                       else
+                                       {
+                                               //we need to skip over the positions.  Since these are VInts, I don't believe there is anyway to know for sure how far to skip
+                                               //
+                                               for (int j = 0; j < freq; j++)
+                                               {
+                                                       tvf.ReadVInt();
+                                               }
+                                       }
+                               }
+                               TermVectorOffsetInfo[] offsets = null;
+                               if (storeOffsets)
+                               {
+                                       //does the mapper even care about offsets?
+                                       if (mapper.IsIgnoringOffsets() == false)
+                                       {
+                                               offsets = new TermVectorOffsetInfo[freq];
+                                               int prevOffset = 0;
+                                               for (int j = 0; j < freq; j++)
+                                               {
+                                                       int startOffset = prevOffset + tvf.ReadVInt();
+                                                       int endOffset = startOffset + tvf.ReadVInt();
+                                                       offsets[j] = new TermVectorOffsetInfo(startOffset, endOffset);
+                                                       prevOffset = endOffset;
+                                               }
+                                       }
+                                       else
+                                       {
+                                               for (int j = 0; j < freq; j++)
+                                               {
+                                                       tvf.ReadVInt();
+                                                       tvf.ReadVInt();
+                                               }
+                                       }
+                               }
+                               mapper.Map(term, freq, offsets, positions);
+                       }
+               }
+               
+               public virtual System.Object Clone()
+               {
+                       
+                       TermVectorsReader clone = (TermVectorsReader) base.MemberwiseClone();
+                       
+                       // These are null when a TermVectorsReader was created
+                       // on a segment that did not have term vectors saved
+                       if (tvx != null && tvd != null && tvf != null)
+                       {
+                               clone.tvx = (IndexInput) tvx.Clone();
+                               clone.tvd = (IndexInput) tvd.Clone();
+                               clone.tvf = (IndexInput) tvf.Clone();
+                       }
+                       
+                       return clone;
+               }
+       }
+       
+       
+       /// <summary> Models the existing parallel array structure</summary>
+       class ParallelArrayTermVectorMapper:TermVectorMapper
+       {
+               
+               private System.String[] terms;
+               private int[] termFreqs;
+               private int[][] positions;
+               private TermVectorOffsetInfo[][] offsets;
+               private int currentPosition;
+               private bool storingOffsets;
+               private bool storingPositions;
+               private System.String field;
+               
+               public override void  SetExpectations(System.String field, int numTerms, bool storeOffsets, bool storePositions)
+               {
+                       this.field = field;
+                       terms = new System.String[numTerms];
+                       termFreqs = new int[numTerms];
+                       this.storingOffsets = storeOffsets;
+                       this.storingPositions = storePositions;
+                       if (storePositions)
+                               this.positions = new int[numTerms][];
+                       if (storeOffsets)
+                               this.offsets = new TermVectorOffsetInfo[numTerms][];
+               }
+               
+               public override void  Map(System.String term, int frequency, TermVectorOffsetInfo[] offsets, int[] positions)
+               {
+                       terms[currentPosition] = term;
+                       termFreqs[currentPosition] = frequency;
+                       if (storingOffsets)
+                       {
+                               this.offsets[currentPosition] = offsets;
+                       }
+                       if (storingPositions)
+                       {
+                               this.positions[currentPosition] = positions;
+                       }
+                       currentPosition++;
+               }
+               
+               /// <summary> Construct the vector</summary>
+               /// <returns> The {@link TermFreqVector} based on the mappings.
+               /// </returns>
+               public virtual TermFreqVector MaterializeVector()
+               {
+                       SegmentTermVector tv = null;
+                       if (field != null && terms != null)
+                       {
+                               if (storingPositions || storingOffsets)
+                               {
+                                       tv = new SegmentTermPositionVector(field, terms, termFreqs, positions, offsets);
+                               }
+                               else
+                               {
+                                       tv = new SegmentTermVector(field, terms, termFreqs);
+                               }
+                       }
+                       return tv;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/TermVectorsTermsWriter.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/TermVectorsTermsWriter.cs
new file mode 100644 (file)
index 0000000..d1b364a
--- /dev/null
@@ -0,0 +1,384 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexOutput = Mono.Lucene.Net.Store.IndexOutput;
+using RAMOutputStream = Mono.Lucene.Net.Store.RAMOutputStream;
+using ArrayUtil = Mono.Lucene.Net.Util.ArrayUtil;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       sealed class TermVectorsTermsWriter:TermsHashConsumer
+       {
+               private void  InitBlock()
+               {
+                       docFreeList = new PerDoc[1];
+               }
+               
+               internal DocumentsWriter docWriter;
+               internal TermVectorsWriter termVectorsWriter;
+               internal PerDoc[] docFreeList;
+               internal int freeCount;
+               internal IndexOutput tvx;
+               internal IndexOutput tvd;
+               internal IndexOutput tvf;
+               internal int lastDocID;
+               
+               public TermVectorsTermsWriter(DocumentsWriter docWriter)
+               {
+                       InitBlock();
+                       this.docWriter = docWriter;
+               }
+               
+               public override TermsHashConsumerPerThread AddThread(TermsHashPerThread termsHashPerThread)
+               {
+                       return new TermVectorsTermsWriterPerThread(termsHashPerThread, this);
+               }
+               
+               internal override void  CreatePostings(RawPostingList[] postings, int start, int count)
+               {
+                       int end = start + count;
+                       for (int i = start; i < end; i++)
+                               postings[i] = new PostingList();
+               }
+               
+               public override void  Flush(System.Collections.IDictionary threadsAndFields, SegmentWriteState state)
+               {
+                       lock (this)
+                       {
+                // NOTE: it's possible that all documents seen in this segment
+                // hit non-aborting exceptions, in which case we will
+                // not have yet init'd the TermVectorsWriter.  This is
+                // actually OK (unlike in the stored fields case)
+                // because, although IieldInfos.hasVectors() will return
+                // true, the TermVectorsReader gracefully handles
+                // non-existence of the term vectors files.
+                               if (tvx != null)
+                               {
+                                       
+                                       if (state.numDocsInStore > 0)
+                                       // In case there are some final documents that we
+                                       // didn't see (because they hit a non-aborting exception):
+                                               Fill(state.numDocsInStore - docWriter.GetDocStoreOffset());
+                                       
+                                       tvx.Flush();
+                                       tvd.Flush();
+                                       tvf.Flush();
+                               }
+
+                System.Collections.IEnumerator it = new System.Collections.Hashtable(threadsAndFields).GetEnumerator();
+                               while (it.MoveNext())
+                               {
+                                       System.Collections.DictionaryEntry entry = (System.Collections.DictionaryEntry) it.Current;
+                                       System.Collections.IEnumerator it2 = ((System.Collections.ICollection) entry.Value).GetEnumerator();
+                                       while (it2.MoveNext())
+                                       {
+                                               TermVectorsTermsWriterPerField perField = (TermVectorsTermsWriterPerField) ((System.Collections.DictionaryEntry) it2.Current).Key;
+                                               perField.termsHashPerField.Reset();
+                                               perField.ShrinkHash();
+                                       }
+                                       
+                                       TermVectorsTermsWriterPerThread perThread = (TermVectorsTermsWriterPerThread) entry.Key;
+                                       perThread.termsHashPerThread.Reset(true);
+                               }
+                       }
+               }
+               
+               internal override void  CloseDocStore(SegmentWriteState state)
+               {
+                       lock (this)
+                       {
+                               if (tvx != null)
+                               {
+                                       // At least one doc in this run had term vectors
+                                       // enabled
+                                       Fill(state.numDocsInStore - docWriter.GetDocStoreOffset());
+                                       tvx.Close();
+                                       tvf.Close();
+                                       tvd.Close();
+                                       tvx = null;
+                                       System.Diagnostics.Debug.Assert(state.docStoreSegmentName != null);
+                                       System.String fileName = state.docStoreSegmentName + "." + IndexFileNames.VECTORS_INDEX_EXTENSION;
+                                       if (4 + ((long) state.numDocsInStore) * 16 != state.directory.FileLength(fileName))
+                                               throw new System.SystemException("after flush: tvx size mismatch: " + state.numDocsInStore + " docs vs " + state.directory.FileLength(fileName) + " length in bytes of " + fileName + " file exists?=" + state.directory.FileExists(fileName));
+                                       
+                                       SupportClass.CollectionsHelper.AddIfNotContains(state.flushedFiles, state.docStoreSegmentName + "." + IndexFileNames.VECTORS_INDEX_EXTENSION);
+                    SupportClass.CollectionsHelper.AddIfNotContains(state.flushedFiles, state.docStoreSegmentName + "." + IndexFileNames.VECTORS_FIELDS_EXTENSION);
+                                       SupportClass.CollectionsHelper.AddIfNotContains(state.flushedFiles, state.docStoreSegmentName + "." + IndexFileNames.VECTORS_DOCUMENTS_EXTENSION);
+                                       
+                                       docWriter.RemoveOpenFile(state.docStoreSegmentName + "." + IndexFileNames.VECTORS_INDEX_EXTENSION);
+                                       docWriter.RemoveOpenFile(state.docStoreSegmentName + "." + IndexFileNames.VECTORS_FIELDS_EXTENSION);
+                                       docWriter.RemoveOpenFile(state.docStoreSegmentName + "." + IndexFileNames.VECTORS_DOCUMENTS_EXTENSION);
+                                       
+                                       lastDocID = 0;
+                               }
+                       }
+               }
+               
+               internal int allocCount;
+               
+               internal PerDoc GetPerDoc()
+               {
+                       lock (this)
+                       {
+                               if (freeCount == 0)
+                               {
+                                       allocCount++;
+                                       if (allocCount > docFreeList.Length)
+                                       {
+                                               // Grow our free list up front to make sure we have
+                                               // enough space to recycle all outstanding PerDoc
+                                               // instances
+                                               System.Diagnostics.Debug.Assert(allocCount == 1 + docFreeList.Length);
+                                               docFreeList = new PerDoc[ArrayUtil.GetNextSize(allocCount)];
+                                       }
+                                       return new PerDoc(this);
+                               }
+                               else
+                                       return docFreeList[--freeCount];
+                       }
+               }
+               
+               /// <summary>Fills in no-term-vectors for all docs we haven't seen
+               /// since the last doc that had term vectors. 
+               /// </summary>
+               internal void  Fill(int docID)
+               {
+                       int docStoreOffset = docWriter.GetDocStoreOffset();
+                       int end = docID + docStoreOffset;
+                       if (lastDocID < end)
+                       {
+                               long tvfPosition = tvf.GetFilePointer();
+                               while (lastDocID < end)
+                               {
+                                       tvx.WriteLong(tvd.GetFilePointer());
+                                       tvd.WriteVInt(0);
+                                       tvx.WriteLong(tvfPosition);
+                                       lastDocID++;
+                               }
+                       }
+               }
+               
+               internal void  InitTermVectorsWriter()
+               {
+                       lock (this)
+                       {
+                               if (tvx == null)
+                               {
+                                       
+                                       System.String docStoreSegment = docWriter.GetDocStoreSegment();
+                                       
+                                       if (docStoreSegment == null)
+                                               return ;
+                                       
+                                       System.Diagnostics.Debug.Assert(docStoreSegment != null);
+                                       
+                                       // If we hit an exception while init'ing the term
+                                       // vector output files, we must abort this segment
+                                       // because those files will be in an unknown
+                                       // state:
+                                       tvx = docWriter.directory.CreateOutput(docStoreSegment + "." + IndexFileNames.VECTORS_INDEX_EXTENSION);
+                                       tvd = docWriter.directory.CreateOutput(docStoreSegment + "." + IndexFileNames.VECTORS_DOCUMENTS_EXTENSION);
+                                       tvf = docWriter.directory.CreateOutput(docStoreSegment + "." + IndexFileNames.VECTORS_FIELDS_EXTENSION);
+                                       
+                                       tvx.WriteInt(TermVectorsReader.FORMAT_CURRENT);
+                                       tvd.WriteInt(TermVectorsReader.FORMAT_CURRENT);
+                                       tvf.WriteInt(TermVectorsReader.FORMAT_CURRENT);
+                                       
+                                       docWriter.AddOpenFile(docStoreSegment + "." + IndexFileNames.VECTORS_INDEX_EXTENSION);
+                                       docWriter.AddOpenFile(docStoreSegment + "." + IndexFileNames.VECTORS_FIELDS_EXTENSION);
+                                       docWriter.AddOpenFile(docStoreSegment + "." + IndexFileNames.VECTORS_DOCUMENTS_EXTENSION);
+                                       
+                                       lastDocID = 0;
+                               }
+                       }
+               }
+               
+               internal void  FinishDocument(PerDoc perDoc)
+               {
+                       lock (this)
+                       {
+                               
+                               System.Diagnostics.Debug.Assert(docWriter.writer.TestPoint("TermVectorsTermsWriter.finishDocument start"));
+                               
+                               InitTermVectorsWriter();
+                               
+                               Fill(perDoc.docID);
+                               
+                               // Append term vectors to the real outputs:
+                               tvx.WriteLong(tvd.GetFilePointer());
+                               tvx.WriteLong(tvf.GetFilePointer());
+                               tvd.WriteVInt(perDoc.numVectorFields);
+                               if (perDoc.numVectorFields > 0)
+                               {
+                                       for (int i = 0; i < perDoc.numVectorFields; i++)
+                                               tvd.WriteVInt(perDoc.fieldNumbers[i]);
+                                       System.Diagnostics.Debug.Assert(0 == perDoc.fieldPointers [0]);
+                                       long lastPos = perDoc.fieldPointers[0];
+                                       for (int i = 1; i < perDoc.numVectorFields; i++)
+                                       {
+                                               long pos = perDoc.fieldPointers[i];
+                                               tvd.WriteVLong(pos - lastPos);
+                                               lastPos = pos;
+                                       }
+                    perDoc.perDocTvf.WriteTo(tvf);
+                                       perDoc.numVectorFields = 0;
+                               }
+                               
+                               System.Diagnostics.Debug.Assert(lastDocID == perDoc.docID + docWriter.GetDocStoreOffset());
+                               
+                               lastDocID++;
+                perDoc.Reset();
+                               Free(perDoc);
+                               System.Diagnostics.Debug.Assert(docWriter.writer.TestPoint("TermVectorsTermsWriter.finishDocument end"));
+                       }
+               }
+               
+               public bool FreeRAM()
+               {
+                       // We don't hold any state beyond one doc, so we don't
+                       // free persistent RAM here
+                       return false;
+               }
+               
+               public override void  Abort()
+               {
+                       if (tvx != null)
+                       {
+                               try
+                               {
+                                       tvx.Close();
+                               }
+                               catch (System.Exception t)
+                               {
+                               }
+                               tvx = null;
+                       }
+                       if (tvd != null)
+                       {
+                               try
+                               {
+                                       tvd.Close();
+                               }
+                               catch (System.Exception t)
+                               {
+                               }
+                               tvd = null;
+                       }
+                       if (tvf != null)
+                       {
+                               try
+                               {
+                                       tvf.Close();
+                               }
+                               catch (System.Exception t)
+                               {
+                               }
+                               tvf = null;
+                       }
+                       lastDocID = 0;
+               }
+               
+               internal void  Free(PerDoc doc)
+               {
+                       lock (this)
+                       {
+                               System.Diagnostics.Debug.Assert(freeCount < docFreeList.Length);
+                               docFreeList[freeCount++] = doc;
+                       }
+               }
+               
+               internal class PerDoc:DocumentsWriter.DocWriter
+               {
+                       public PerDoc(TermVectorsTermsWriter enclosingInstance)
+                       {
+                               InitBlock(enclosingInstance);
+                       }
+                       private void  InitBlock(TermVectorsTermsWriter enclosingInstance)
+                       {
+                               this.enclosingInstance = enclosingInstance;
+                buffer = enclosingInstance.docWriter.NewPerDocBuffer();
+                perDocTvf = new RAMOutputStream(buffer);
+                       }
+                       private TermVectorsTermsWriter enclosingInstance;
+                       public TermVectorsTermsWriter Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       
+                       internal DocumentsWriter.PerDocBuffer buffer;
+            internal RAMOutputStream perDocTvf;
+                       internal int numVectorFields;
+                       
+                       internal int[] fieldNumbers = new int[1];
+                       internal long[] fieldPointers = new long[1];
+                       
+                       internal void  Reset()
+                       {
+                perDocTvf.Reset();
+                buffer.Recycle();
+                               numVectorFields = 0;
+                       }
+                       
+                       public override void  Abort()
+                       {
+                               Reset();
+                               Enclosing_Instance.Free(this);
+                       }
+                       
+                       internal void  AddField(int fieldNumber)
+                       {
+                               if (numVectorFields == fieldNumbers.Length)
+                               {
+                                       fieldNumbers = ArrayUtil.Grow(fieldNumbers);
+                                       fieldPointers = ArrayUtil.Grow(fieldPointers);
+                               }
+                               fieldNumbers[numVectorFields] = fieldNumber;
+                fieldPointers[numVectorFields] = perDocTvf.GetFilePointer();
+                               numVectorFields++;
+                       }
+                       
+                       public override long SizeInBytes()
+                       {
+                return buffer.GetSizeInBytes();
+                       }
+                       
+                       public override void  Finish()
+                       {
+                               Enclosing_Instance.FinishDocument(this);
+                       }
+               }
+               
+               internal sealed class PostingList:RawPostingList
+               {
+                       internal int freq; // How many times this term occurred in the current doc
+                       internal int lastOffset; // Last offset we saw
+                       internal int lastPosition; // Last position where this term occurred
+               }
+               
+               internal override int BytesPerPosting()
+               {
+                       return RawPostingList.BYTES_SIZE + 3 * DocumentsWriter.INT_NUM_BYTE;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/TermVectorsTermsWriterPerField.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/TermVectorsTermsWriterPerField.cs
new file mode 100644 (file)
index 0000000..1aeda5a
--- /dev/null
@@ -0,0 +1,291 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using OffsetAttribute = Mono.Lucene.Net.Analysis.Tokenattributes.OffsetAttribute;
+using Fieldable = Mono.Lucene.Net.Documents.Fieldable;
+using IndexOutput = Mono.Lucene.Net.Store.IndexOutput;
+using UnicodeUtil = Mono.Lucene.Net.Util.UnicodeUtil;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       sealed class TermVectorsTermsWriterPerField:TermsHashConsumerPerField
+       {
+               
+               internal TermVectorsTermsWriterPerThread perThread;
+               internal TermsHashPerField termsHashPerField;
+               internal TermVectorsTermsWriter termsWriter;
+               internal FieldInfo fieldInfo;
+               internal DocumentsWriter.DocState docState;
+               internal FieldInvertState fieldState;
+               
+               internal bool doVectors;
+               internal bool doVectorPositions;
+               internal bool doVectorOffsets;
+               
+               internal int maxNumPostings;
+               internal OffsetAttribute offsetAttribute = null;
+               
+               public TermVectorsTermsWriterPerField(TermsHashPerField termsHashPerField, TermVectorsTermsWriterPerThread perThread, FieldInfo fieldInfo)
+               {
+                       this.termsHashPerField = termsHashPerField;
+                       this.perThread = perThread;
+                       this.termsWriter = perThread.termsWriter;
+                       this.fieldInfo = fieldInfo;
+                       docState = termsHashPerField.docState;
+                       fieldState = termsHashPerField.fieldState;
+               }
+               
+               internal override int GetStreamCount()
+               {
+                       return 2;
+               }
+               
+               internal override bool Start(Fieldable[] fields, int count)
+               {
+                       doVectors = false;
+                       doVectorPositions = false;
+                       doVectorOffsets = false;
+                       
+                       for (int i = 0; i < count; i++)
+                       {
+                               Fieldable field = fields[i];
+                               if (field.IsIndexed() && field.IsTermVectorStored())
+                               {
+                                       doVectors = true;
+                                       doVectorPositions |= field.IsStorePositionWithTermVector();
+                                       doVectorOffsets |= field.IsStoreOffsetWithTermVector();
+                               }
+                       }
+                       
+                       if (doVectors)
+                       {
+                               if (perThread.doc == null)
+                               {
+                                       perThread.doc = termsWriter.GetPerDoc();
+                                       perThread.doc.docID = docState.docID;
+                                       System.Diagnostics.Debug.Assert(perThread.doc.numVectorFields == 0);
+                                       System.Diagnostics.Debug.Assert(0 == perThread.doc.perDocTvf.Length());
+                                       System.Diagnostics.Debug.Assert(0 == perThread.doc.perDocTvf.GetFilePointer());
+                               }
+
+                System.Diagnostics.Debug.Assert(perThread.doc.docID == docState.docID);
+                if (termsHashPerField.numPostings != 0)
+                {
+                    // Only necessary if previous doc hit a
+                    // non-aborting exception while writing vectors in
+                    // this field:
+                    termsHashPerField.Reset();
+                    perThread.termsHashPerThread.Reset(false);
+                }
+                       }
+                       
+                       // TODO: only if needed for performance
+                       //perThread.postingsCount = 0;
+                       
+                       return doVectors;
+               }
+               
+               public void  Abort()
+               {
+               }
+               
+               /// <summary>Called once per field per document if term vectors
+               /// are enabled, to write the vectors to
+               /// RAMOutputStream, which is then quickly flushed to
+               /// the real term vectors files in the Directory. 
+               /// </summary>
+               internal override void  Finish()
+               {
+                       
+                       System.Diagnostics.Debug.Assert(docState.TestPoint("TermVectorsTermsWriterPerField.finish start"));
+                       
+                       int numPostings = termsHashPerField.numPostings;
+                       
+                       System.Diagnostics.Debug.Assert(numPostings >= 0);
+                       
+                       if (!doVectors || numPostings == 0)
+                               return ;
+                       
+                       if (numPostings > maxNumPostings)
+                               maxNumPostings = numPostings;
+                       
+                       IndexOutput tvf = perThread.doc.perDocTvf;
+                       
+                       // This is called once, after inverting all occurences
+                       // of a given field in the doc.  At this point we flush
+                       // our hash into the DocWriter.
+                       
+                       System.Diagnostics.Debug.Assert(fieldInfo.storeTermVector);
+                       System.Diagnostics.Debug.Assert(perThread.VectorFieldsInOrder(fieldInfo));
+                       
+                       perThread.doc.AddField(termsHashPerField.fieldInfo.number);
+                       
+                       RawPostingList[] postings = termsHashPerField.SortPostings();
+                       
+                       tvf.WriteVInt(numPostings);
+                       byte bits = (byte) (0x0);
+                       if (doVectorPositions)
+                               bits |= TermVectorsReader.STORE_POSITIONS_WITH_TERMVECTOR;
+                       if (doVectorOffsets)
+                               bits |= TermVectorsReader.STORE_OFFSET_WITH_TERMVECTOR;
+                       tvf.WriteByte(bits);
+                       
+                       int encoderUpto = 0;
+                       int lastTermBytesCount = 0;
+                       
+                       ByteSliceReader reader = perThread.vectorSliceReader;
+                       char[][] charBuffers = perThread.termsHashPerThread.charPool.buffers;
+                       for (int j = 0; j < numPostings; j++)
+                       {
+                               TermVectorsTermsWriter.PostingList posting = (TermVectorsTermsWriter.PostingList) postings[j];
+                               int freq = posting.freq;
+                               
+                               char[] text2 = charBuffers[posting.textStart >> DocumentsWriter.CHAR_BLOCK_SHIFT];
+                               int start2 = posting.textStart & DocumentsWriter.CHAR_BLOCK_MASK;
+                               
+                               // We swap between two encoders to save copying
+                               // last Term's byte array
+                               UnicodeUtil.UTF8Result utf8Result = perThread.utf8Results[encoderUpto];
+                               
+                               // TODO: we could do this incrementally
+                               UnicodeUtil.UTF16toUTF8(text2, start2, utf8Result);
+                               int termBytesCount = utf8Result.length;
+                               
+                               // TODO: UTF16toUTF8 could tell us this prefix
+                               // Compute common prefix between last term and
+                               // this term
+                               int prefix = 0;
+                               if (j > 0)
+                               {
+                                       byte[] lastTermBytes = perThread.utf8Results[1 - encoderUpto].result;
+                                       byte[] termBytes = perThread.utf8Results[encoderUpto].result;
+                                       while (prefix < lastTermBytesCount && prefix < termBytesCount)
+                                       {
+                                               if (lastTermBytes[prefix] != termBytes[prefix])
+                                                       break;
+                                               prefix++;
+                                       }
+                               }
+                               encoderUpto = 1 - encoderUpto;
+                               lastTermBytesCount = termBytesCount;
+                               
+                               int suffix = termBytesCount - prefix;
+                               tvf.WriteVInt(prefix);
+                               tvf.WriteVInt(suffix);
+                               tvf.WriteBytes(utf8Result.result, prefix, suffix);
+                               tvf.WriteVInt(freq);
+                               
+                               if (doVectorPositions)
+                               {
+                                       termsHashPerField.InitReader(reader, posting, 0);
+                                       reader.WriteTo(tvf);
+                               }
+                               
+                               if (doVectorOffsets)
+                               {
+                                       termsHashPerField.InitReader(reader, posting, 1);
+                                       reader.WriteTo(tvf);
+                               }
+                       }
+                       
+                       termsHashPerField.Reset();
+
+            // NOTE: we clear, per-field, at the thread level,
+            // because term vectors fully write themselves on each
+            // field; this saves RAM (eg if large doc has two large
+            // fields w/ term vectors on) because we recycle/reuse
+            // all RAM after each field:
+                       perThread.termsHashPerThread.Reset(false);
+               }
+               
+               internal void  ShrinkHash()
+               {
+                       termsHashPerField.ShrinkHash(maxNumPostings);
+                       maxNumPostings = 0;
+               }
+               
+               internal override void  Start(Fieldable f)
+               {
+                       if (doVectorOffsets)
+                       {
+                               offsetAttribute = (OffsetAttribute) fieldState.attributeSource.AddAttribute(typeof(OffsetAttribute));
+                       }
+                       else
+                       {
+                               offsetAttribute = null;
+                       }
+               }
+               
+               internal override void  NewTerm(RawPostingList p0)
+               {
+                       
+                       System.Diagnostics.Debug.Assert(docState.TestPoint("TermVectorsTermsWriterPerField.newTerm start"));
+                       
+                       TermVectorsTermsWriter.PostingList p = (TermVectorsTermsWriter.PostingList) p0;
+                       
+                       p.freq = 1;
+                       
+                       if (doVectorOffsets)
+                       {
+                               int startOffset = fieldState.offset + offsetAttribute.StartOffset(); ;
+                               int endOffset = fieldState.offset + offsetAttribute.EndOffset();
+                               
+                               termsHashPerField.WriteVInt(1, startOffset);
+                               termsHashPerField.WriteVInt(1, endOffset - startOffset);
+                               p.lastOffset = endOffset;
+                       }
+                       
+                       if (doVectorPositions)
+                       {
+                               termsHashPerField.WriteVInt(0, fieldState.position);
+                               p.lastPosition = fieldState.position;
+                       }
+               }
+               
+               internal override void  AddTerm(RawPostingList p0)
+               {
+                       
+                       System.Diagnostics.Debug.Assert(docState.TestPoint("TermVectorsTermsWriterPerField.addTerm start"));
+                       
+                       TermVectorsTermsWriter.PostingList p = (TermVectorsTermsWriter.PostingList) p0;
+                       p.freq++;
+                       
+                       if (doVectorOffsets)
+                       {
+                               int startOffset = fieldState.offset + offsetAttribute.StartOffset(); ;
+                               int endOffset = fieldState.offset + offsetAttribute.EndOffset();
+                               
+                               termsHashPerField.WriteVInt(1, startOffset - p.lastOffset);
+                               termsHashPerField.WriteVInt(1, endOffset - startOffset);
+                               p.lastOffset = endOffset;
+                       }
+                       
+                       if (doVectorPositions)
+                       {
+                               termsHashPerField.WriteVInt(0, fieldState.position - p.lastPosition);
+                               p.lastPosition = fieldState.position;
+                       }
+               }
+               
+               internal override void  SkippingLongTerm()
+               {
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/TermVectorsTermsWriterPerThread.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/TermVectorsTermsWriterPerThread.cs
new file mode 100644 (file)
index 0000000..ab47c77
--- /dev/null
@@ -0,0 +1,106 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using UnicodeUtil = Mono.Lucene.Net.Util.UnicodeUtil;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       sealed class TermVectorsTermsWriterPerThread:TermsHashConsumerPerThread
+       {
+               
+               internal TermVectorsTermsWriter termsWriter;
+               internal TermsHashPerThread termsHashPerThread;
+               internal DocumentsWriter.DocState docState;
+               
+               internal TermVectorsTermsWriter.PerDoc doc;
+               
+               public TermVectorsTermsWriterPerThread(TermsHashPerThread termsHashPerThread, TermVectorsTermsWriter termsWriter)
+               {
+                       this.termsWriter = termsWriter;
+                       this.termsHashPerThread = termsHashPerThread;
+                       docState = termsHashPerThread.docState;
+               }
+               
+               // Used by perField when serializing the term vectors
+               internal ByteSliceReader vectorSliceReader = new ByteSliceReader();
+               
+               internal UnicodeUtil.UTF8Result[] utf8Results = new UnicodeUtil.UTF8Result[]{new UnicodeUtil.UTF8Result(), new UnicodeUtil.UTF8Result()};
+               
+               public override void  StartDocument()
+               {
+                       System.Diagnostics.Debug.Assert(ClearLastVectorFieldName());
+                       if (doc != null)
+                       {
+                               doc.Reset();
+                               doc.docID = docState.docID;
+                       }
+               }
+               
+               public override DocumentsWriter.DocWriter FinishDocument()
+               {
+                       try
+                       {
+                               return doc;
+                       }
+                       finally
+                       {
+                               doc = null;
+                       }
+               }
+               
+               public override TermsHashConsumerPerField AddField(TermsHashPerField termsHashPerField, FieldInfo fieldInfo)
+               {
+                       return new TermVectorsTermsWriterPerField(termsHashPerField, this, fieldInfo);
+               }
+               
+               public override void  Abort()
+               {
+                       if (doc != null)
+                       {
+                               doc.Abort();
+                               doc = null;
+                       }
+               }
+               
+               // Called only by assert
+               internal bool ClearLastVectorFieldName()
+               {
+                       lastVectorFieldName = null;
+                       return true;
+               }
+               
+               // Called only by assert
+               internal System.String lastVectorFieldName;
+               internal bool VectorFieldsInOrder(FieldInfo fi)
+               {
+                       try
+                       {
+                               if (lastVectorFieldName != null)
+                                       return String.CompareOrdinal(lastVectorFieldName, fi.name) < 0;
+                               else
+                                       return true;
+                       }
+                       finally
+                       {
+                               lastVectorFieldName = fi.name;
+                       }
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/TermVectorsWriter.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/TermVectorsWriter.cs
new file mode 100644 (file)
index 0000000..718076b
--- /dev/null
@@ -0,0 +1,245 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using Directory = Mono.Lucene.Net.Store.Directory;
+using IndexOutput = Mono.Lucene.Net.Store.IndexOutput;
+using StringHelper = Mono.Lucene.Net.Util.StringHelper;
+using UnicodeUtil = Mono.Lucene.Net.Util.UnicodeUtil;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       sealed class TermVectorsWriter
+       {
+               
+               private IndexOutput tvx = null, tvd = null, tvf = null;
+               private FieldInfos fieldInfos;
+               internal UnicodeUtil.UTF8Result[] utf8Results = new UnicodeUtil.UTF8Result[]{new UnicodeUtil.UTF8Result(), new UnicodeUtil.UTF8Result()};
+               
+               public TermVectorsWriter(Directory directory, System.String segment, FieldInfos fieldInfos)
+               {
+                       // Open files for TermVector storage
+                       tvx = directory.CreateOutput(segment + "." + IndexFileNames.VECTORS_INDEX_EXTENSION);
+                       tvx.WriteInt(TermVectorsReader.FORMAT_CURRENT);
+                       tvd = directory.CreateOutput(segment + "." + IndexFileNames.VECTORS_DOCUMENTS_EXTENSION);
+                       tvd.WriteInt(TermVectorsReader.FORMAT_CURRENT);
+                       tvf = directory.CreateOutput(segment + "." + IndexFileNames.VECTORS_FIELDS_EXTENSION);
+                       tvf.WriteInt(TermVectorsReader.FORMAT_CURRENT);
+                       
+                       this.fieldInfos = fieldInfos;
+               }
+               
+               /// <summary> Add a complete document specified by all its term vectors. If document has no
+               /// term vectors, add value for tvx.
+               /// 
+               /// </summary>
+               /// <param name="vectors">
+               /// </param>
+               /// <throws>  IOException </throws>
+               public void  AddAllDocVectors(TermFreqVector[] vectors)
+               {
+                       
+                       tvx.WriteLong(tvd.GetFilePointer());
+                       tvx.WriteLong(tvf.GetFilePointer());
+                       
+                       if (vectors != null)
+                       {
+                               int numFields = vectors.Length;
+                               tvd.WriteVInt(numFields);
+                               
+                               long[] fieldPointers = new long[numFields];
+                               
+                               for (int i = 0; i < numFields; i++)
+                               {
+                                       fieldPointers[i] = tvf.GetFilePointer();
+                                       
+                                       int fieldNumber = fieldInfos.FieldNumber(vectors[i].GetField());
+                                       
+                                       // 1st pass: write field numbers to tvd
+                                       tvd.WriteVInt(fieldNumber);
+                                       
+                                       int numTerms = vectors[i].Size();
+                                       tvf.WriteVInt(numTerms);
+                                       
+                                       TermPositionVector tpVector;
+                                       
+                                       byte bits;
+                                       bool storePositions;
+                                       bool storeOffsets;
+                                       
+                                       if (vectors[i] is TermPositionVector)
+                                       {
+                                               // May have positions & offsets
+                                               tpVector = (TermPositionVector) vectors[i];
+                                               storePositions = tpVector.Size() > 0 && tpVector.GetTermPositions(0) != null;
+                                               storeOffsets = tpVector.Size() > 0 && tpVector.GetOffsets(0) != null;
+                                               bits = (byte) ((storePositions?TermVectorsReader.STORE_POSITIONS_WITH_TERMVECTOR: (byte) 0) + (storeOffsets?TermVectorsReader.STORE_OFFSET_WITH_TERMVECTOR: (byte) 0));
+                                       }
+                                       else
+                                       {
+                                               tpVector = null;
+                                               bits = 0;
+                                               storePositions = false;
+                                               storeOffsets = false;
+                                       }
+                                       
+                                       tvf.WriteVInt(bits);
+                                       
+                                       System.String[] terms = vectors[i].GetTerms();
+                                       int[] freqs = vectors[i].GetTermFrequencies();
+                                       
+                                       int utf8Upto = 0;
+                                       utf8Results[1].length = 0;
+                                       
+                                       for (int j = 0; j < numTerms; j++)
+                                       {
+                                               
+                                               UnicodeUtil.UTF16toUTF8(terms[j], 0, terms[j].Length, utf8Results[utf8Upto]);
+                                               
+                                               int start = StringHelper.BytesDifference(utf8Results[1 - utf8Upto].result, utf8Results[1 - utf8Upto].length, utf8Results[utf8Upto].result, utf8Results[utf8Upto].length);
+                                               int length = utf8Results[utf8Upto].length - start;
+                                               tvf.WriteVInt(start); // write shared prefix length
+                                               tvf.WriteVInt(length); // write delta length
+                                               tvf.WriteBytes(utf8Results[utf8Upto].result, start, length); // write delta bytes
+                                               utf8Upto = 1 - utf8Upto;
+                                               
+                                               int termFreq = freqs[j];
+                                               
+                                               tvf.WriteVInt(termFreq);
+                                               
+                                               if (storePositions)
+                                               {
+                                                       int[] positions = tpVector.GetTermPositions(j);
+                                                       if (positions == null)
+                                                               throw new System.SystemException("Trying to write positions that are null!");
+                                                       System.Diagnostics.Debug.Assert(positions.Length == termFreq);
+                                                       
+                                                       // use delta encoding for positions
+                                                       int lastPosition = 0;
+                                                       for (int k = 0; k < positions.Length; k++)
+                                                       {
+                                                               int position = positions[k];
+                                                               tvf.WriteVInt(position - lastPosition);
+                                                               lastPosition = position;
+                                                       }
+                                               }
+                                               
+                                               if (storeOffsets)
+                                               {
+                                                       TermVectorOffsetInfo[] offsets = tpVector.GetOffsets(j);
+                                                       if (offsets == null)
+                                                               throw new System.SystemException("Trying to write offsets that are null!");
+                                                       System.Diagnostics.Debug.Assert(offsets.Length == termFreq);
+                                                       
+                                                       // use delta encoding for offsets
+                                                       int lastEndOffset = 0;
+                                                       for (int k = 0; k < offsets.Length; k++)
+                                                       {
+                                                               int startOffset = offsets[k].GetStartOffset();
+                                                               int endOffset = offsets[k].GetEndOffset();
+                                                               tvf.WriteVInt(startOffset - lastEndOffset);
+                                                               tvf.WriteVInt(endOffset - startOffset);
+                                                               lastEndOffset = endOffset;
+                                                       }
+                                               }
+                                       }
+                               }
+                               
+                               // 2nd pass: write field pointers to tvd
+                               if (numFields > 1)
+                               {
+                                       long lastFieldPointer = fieldPointers[0];
+                                       for (int i = 1; i < numFields; i++)
+                                       {
+                                               long fieldPointer = fieldPointers[i];
+                                               tvd.WriteVLong(fieldPointer - lastFieldPointer);
+                                               lastFieldPointer = fieldPointer;
+                                       }
+                               }
+                       }
+                       else
+                               tvd.WriteVInt(0);
+               }
+               
+               /// <summary> Do a bulk copy of numDocs documents from reader to our
+               /// streams.  This is used to expedite merging, if the
+               /// field numbers are congruent.
+               /// </summary>
+               internal void  AddRawDocuments(TermVectorsReader reader, int[] tvdLengths, int[] tvfLengths, int numDocs)
+               {
+                       long tvdPosition = tvd.GetFilePointer();
+                       long tvfPosition = tvf.GetFilePointer();
+                       long tvdStart = tvdPosition;
+                       long tvfStart = tvfPosition;
+                       for (int i = 0; i < numDocs; i++)
+                       {
+                               tvx.WriteLong(tvdPosition);
+                               tvdPosition += tvdLengths[i];
+                               tvx.WriteLong(tvfPosition);
+                               tvfPosition += tvfLengths[i];
+                       }
+                       tvd.CopyBytes(reader.GetTvdStream(), tvdPosition - tvdStart);
+                       tvf.CopyBytes(reader.GetTvfStream(), tvfPosition - tvfStart);
+                       System.Diagnostics.Debug.Assert(tvd.GetFilePointer() == tvdPosition);
+                       System.Diagnostics.Debug.Assert(tvf.GetFilePointer() == tvfPosition);
+               }
+               
+               /// <summary>Close all streams. </summary>
+               internal void  Close()
+               {
+                       // make an effort to close all streams we can but remember and re-throw
+                       // the first exception encountered in this process
+                       System.IO.IOException keep = null;
+                       if (tvx != null)
+                               try
+                               {
+                                       tvx.Close();
+                               }
+                               catch (System.IO.IOException e)
+                               {
+                                       if (keep == null)
+                                               keep = e;
+                               }
+                       if (tvd != null)
+                               try
+                               {
+                                       tvd.Close();
+                               }
+                               catch (System.IO.IOException e)
+                               {
+                                       if (keep == null)
+                                               keep = e;
+                               }
+                       if (tvf != null)
+                               try
+                               {
+                                       tvf.Close();
+                               }
+                               catch (System.IO.IOException e)
+                               {
+                                       if (keep == null)
+                                               keep = e;
+                               }
+                       if (keep != null)
+                       {
+                               throw new System.IO.IOException(keep.StackTrace);
+                       }
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/TermsHash.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/TermsHash.cs
new file mode 100644 (file)
index 0000000..298e1d9
--- /dev/null
@@ -0,0 +1,284 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using ArrayUtil = Mono.Lucene.Net.Util.ArrayUtil;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       /// <summary>This class implements {@link InvertedDocConsumer}, which
+       /// is passed each token produced by the analyzer on each
+       /// field.  It stores these tokens in a hash table, and
+       /// allocates separate byte streams per token.  Consumers of
+       /// this class, eg {@link FreqProxTermsWriter} and {@link
+       /// TermVectorsTermsWriter}, write their own byte streams
+       /// under each term.
+       /// </summary>
+       
+       sealed class TermsHash:InvertedDocConsumer
+       {
+               
+               internal TermsHashConsumer consumer;
+               internal TermsHash nextTermsHash;
+               internal int bytesPerPosting;
+               internal int postingsFreeChunk;
+               internal DocumentsWriter docWriter;
+                                               
+               private RawPostingList[] postingsFreeList = new RawPostingList[1];
+               private int postingsFreeCount;
+               private int postingsAllocCount;
+               internal bool trackAllocations;
+               
+               public TermsHash(DocumentsWriter docWriter, bool trackAllocations, TermsHashConsumer consumer, TermsHash nextTermsHash)
+               {
+                       this.docWriter = docWriter;
+                       this.consumer = consumer;
+                       this.nextTermsHash = nextTermsHash;
+                       this.trackAllocations = trackAllocations;
+                       
+                       // Why + 4*POINTER_NUM_BYTE below?
+                       //   +1: Posting is referenced by postingsFreeList array
+                       //   +3: Posting is referenced by hash, which
+                       //       targets 25-50% fill factor; approximate this
+                       //       as 3X # pointers
+                       bytesPerPosting = consumer.BytesPerPosting() + 4 * DocumentsWriter.POINTER_NUM_BYTE;
+                       postingsFreeChunk = (int) (DocumentsWriter.BYTE_BLOCK_SIZE / bytesPerPosting);
+               }
+               
+               internal override InvertedDocConsumerPerThread AddThread(DocInverterPerThread docInverterPerThread)
+               {
+                       return new TermsHashPerThread(docInverterPerThread, this, nextTermsHash, null);
+               }
+               
+               internal TermsHashPerThread AddThread(DocInverterPerThread docInverterPerThread, TermsHashPerThread primaryPerThread)
+               {
+                       return new TermsHashPerThread(docInverterPerThread, this, nextTermsHash, primaryPerThread);
+               }
+               
+               internal override void  SetFieldInfos(FieldInfos fieldInfos)
+               {
+                       this.fieldInfos = fieldInfos;
+                       consumer.SetFieldInfos(fieldInfos);
+               }
+
+        // NOTE: do not make this sync'd; it's not necessary (DW
+        // ensures all other threads are idle), and it leads to
+        // deadlock
+               public override void  Abort()
+               {
+                       consumer.Abort();
+                       if (nextTermsHash != null)
+                               nextTermsHash.Abort();
+               }
+               
+               internal void  ShrinkFreePostings(System.Collections.IDictionary threadsAndFields, SegmentWriteState state)
+               {
+                       
+                       System.Diagnostics.Debug.Assert(postingsFreeCount == postingsAllocCount, "Thread.currentThread().getName()" + ": postingsFreeCount=" + postingsFreeCount + " postingsAllocCount=" + postingsAllocCount + " consumer=" + consumer);
+
+            int newSize = 1;
+                       if (newSize != postingsFreeList.Length)
+                       {
+                if (postingsFreeCount > newSize)
+                {
+                    if (trackAllocations)
+                    {
+                        docWriter.BytesAllocated(-(postingsFreeCount - newSize) * bytesPerPosting);
+                    }
+                    postingsFreeCount = newSize;
+                    postingsAllocCount = newSize;
+                }
+
+                               RawPostingList[] newArray = new RawPostingList[newSize];
+                               Array.Copy(postingsFreeList, 0, newArray, 0, postingsFreeCount);
+                               postingsFreeList = newArray;
+                       }
+               }
+               
+               internal override void  CloseDocStore(SegmentWriteState state)
+               {
+                       lock (this)
+                       {
+                               consumer.CloseDocStore(state);
+                               if (nextTermsHash != null)
+                                       nextTermsHash.CloseDocStore(state);
+                       }
+               }
+               
+               internal override void  Flush(System.Collections.IDictionary threadsAndFields, SegmentWriteState state)
+               {
+                       lock (this)
+                       {
+                               System.Collections.IDictionary childThreadsAndFields = new System.Collections.Hashtable();
+                               System.Collections.IDictionary nextThreadsAndFields;
+                               
+                               if (nextTermsHash != null)
+                               {
+                                       nextThreadsAndFields = new System.Collections.Hashtable();
+                               }
+                               else
+                                       nextThreadsAndFields = null;
+
+                System.Collections.IEnumerator it = new System.Collections.Hashtable(threadsAndFields).GetEnumerator();
+                               while (it.MoveNext())
+                               {
+                                       
+                                       System.Collections.DictionaryEntry entry = (System.Collections.DictionaryEntry) it.Current;
+                                       
+                                       TermsHashPerThread perThread = (TermsHashPerThread) entry.Key;
+                                       
+                                       System.Collections.ICollection fields = (System.Collections.ICollection) entry.Value;
+                                       
+                                       System.Collections.IEnumerator fieldsIt = fields.GetEnumerator();
+                    System.Collections.Hashtable childFields = new System.Collections.Hashtable();
+                                       System.Collections.Hashtable nextChildFields;
+                                       
+                                       if (nextTermsHash != null)
+                                       {
+                        nextChildFields = new System.Collections.Hashtable();
+                                       }
+                                       else
+                                               nextChildFields = null;
+                                       
+                                       while (fieldsIt.MoveNext())
+                                       {
+                                               TermsHashPerField perField = (TermsHashPerField) ((System.Collections.DictionaryEntry) fieldsIt.Current).Key;
+                                               childFields[perField.consumer] = perField.consumer;
+                                               if (nextTermsHash != null)
+                                                       nextChildFields[perField.nextPerField] = perField.nextPerField;
+                                       }
+                                       
+                                       childThreadsAndFields[perThread.consumer] = childFields;
+                                       if (nextTermsHash != null)
+                                               nextThreadsAndFields[perThread.nextPerThread] = nextChildFields;
+                               }
+                               
+                               consumer.Flush(childThreadsAndFields, state);
+                               
+                               ShrinkFreePostings(threadsAndFields, state);
+                               
+                               if (nextTermsHash != null)
+                                       nextTermsHash.Flush(nextThreadsAndFields, state);
+                       }
+               }
+               
+               public override bool FreeRAM()
+               {
+                       if (!trackAllocations)
+                               return false;
+                               
+                       bool any;
+                       long bytesFreed = 0;
+            lock (this)
+            {
+                int numToFree;
+                if (postingsFreeCount >= postingsFreeChunk)
+                    numToFree = postingsFreeChunk;
+                else
+                    numToFree = postingsFreeCount;
+                any = numToFree > 0;
+                if (any)
+                {
+                    for (int i = postingsFreeCount - numToFree; i < postingsFreeCount; i++)
+                    {
+                        postingsFreeList[i] = null;
+                    }
+                    //Arrays.fill(postingsFreeList, postingsFreeCount - numToFree, postingsFreeCount, null);
+                    postingsFreeCount -= numToFree;
+                    postingsAllocCount -= numToFree;
+                    bytesFreed = -numToFree * bytesPerPosting;
+                    any = true;
+                }
+            }
+
+                       if (any)
+                       {
+                docWriter.BytesAllocated(bytesFreed);
+                       }
+                               
+                       if (nextTermsHash != null)
+                               any |= nextTermsHash.FreeRAM();
+                               
+                       return any;
+               }
+               
+               public void  RecyclePostings(RawPostingList[] postings, int numPostings)
+               {
+                       lock (this)
+                       {
+                               
+                               System.Diagnostics.Debug.Assert(postings.Length >= numPostings);
+                               
+                               // Move all Postings from this ThreadState back to our
+                               // free list.  We pre-allocated this array while we were
+                               // creating Postings to make sure it's large enough
+                               System.Diagnostics.Debug.Assert(postingsFreeCount + numPostings <= postingsFreeList.Length);
+                               Array.Copy(postings, 0, postingsFreeList, postingsFreeCount, numPostings);
+                               postingsFreeCount += numPostings;
+                       }
+               }
+               
+               public void  GetPostings(RawPostingList[] postings)
+               {
+                       lock (this)
+                       {
+                               
+                               System.Diagnostics.Debug.Assert(docWriter.writer.TestPoint("TermsHash.getPostings start"));
+                               
+                               System.Diagnostics.Debug.Assert(postingsFreeCount <= postingsFreeList.Length);
+                               System.Diagnostics.Debug.Assert(postingsFreeCount <= postingsAllocCount, "postingsFreeCount=" + postingsFreeCount + " postingsAllocCount=" + postingsAllocCount);
+                               
+                               int numToCopy;
+                               if (postingsFreeCount < postings.Length)
+                                       numToCopy = postingsFreeCount;
+                               else
+                                       numToCopy = postings.Length;
+                               int start = postingsFreeCount - numToCopy;
+                               System.Diagnostics.Debug.Assert(start >= 0);
+                               System.Diagnostics.Debug.Assert(start + numToCopy <= postingsFreeList.Length);
+                               System.Diagnostics.Debug.Assert(numToCopy <= postings.Length);
+                               Array.Copy(postingsFreeList, start, postings, 0, numToCopy);
+                               
+                               // Directly allocate the remainder if any
+                               if (numToCopy != postings.Length)
+                               {
+                                       int extra = postings.Length - numToCopy;
+                                       int newPostingsAllocCount = postingsAllocCount + extra;
+                                       
+                                       consumer.CreatePostings(postings, numToCopy, extra);
+                                       System.Diagnostics.Debug.Assert(docWriter.writer.TestPoint("TermsHash.getPostings after create"));
+                                       postingsAllocCount += extra;
+                                       
+                                       if (trackAllocations)
+                                               docWriter.BytesAllocated(extra * bytesPerPosting);
+                                       
+                                       if (newPostingsAllocCount > postingsFreeList.Length)
+                                       // Pre-allocate the postingsFreeList so it's large
+                                       // enough to hold all postings we've given out
+                                               postingsFreeList = new RawPostingList[ArrayUtil.GetNextSize(newPostingsAllocCount)];
+                               }
+                               
+                               postingsFreeCount -= numToCopy;
+                               
+                               if (trackAllocations)
+                                       docWriter.BytesUsed(postings.Length * bytesPerPosting);
+                       }
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/TermsHashConsumer.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/TermsHashConsumer.cs
new file mode 100644 (file)
index 0000000..a275fbb
--- /dev/null
@@ -0,0 +1,39 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       abstract class TermsHashConsumer
+       {
+               internal abstract int BytesPerPosting();
+               internal abstract void  CreatePostings(RawPostingList[] postings, int start, int count);
+               public abstract TermsHashConsumerPerThread AddThread(TermsHashPerThread perThread);
+               public abstract void  Flush(System.Collections.IDictionary threadsAndFields, SegmentWriteState state);
+               public abstract void  Abort();
+               internal abstract void  CloseDocStore(SegmentWriteState state);
+               
+               internal FieldInfos fieldInfos;
+               
+               internal virtual void  SetFieldInfos(FieldInfos fieldInfos)
+               {
+                       this.fieldInfos = fieldInfos;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/TermsHashConsumerPerField.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/TermsHashConsumerPerField.cs
new file mode 100644 (file)
index 0000000..04bf75a
--- /dev/null
@@ -0,0 +1,42 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+/// <summary>Implement this class to plug into the TermsHash
+/// processor, which inverts & stores Tokens into a hash
+/// table and provides an API for writing bytes into
+/// multiple streams for each unique Token. 
+/// </summary>
+
+using System;
+
+using Fieldable = Mono.Lucene.Net.Documents.Fieldable;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       abstract class TermsHashConsumerPerField
+       {
+               internal abstract bool Start(Fieldable[] fields, int count);
+               internal abstract void  Finish();
+               internal abstract void  SkippingLongTerm();
+               internal abstract void  Start(Fieldable field);
+               internal abstract void  NewTerm(RawPostingList p);
+               internal abstract void  AddTerm(RawPostingList p);
+               internal abstract int GetStreamCount();
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/TermsHashConsumerPerThread.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/TermsHashConsumerPerThread.cs
new file mode 100644 (file)
index 0000000..8e8d3da
--- /dev/null
@@ -0,0 +1,30 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       abstract class TermsHashConsumerPerThread
+       {
+               public abstract void  StartDocument();
+               public abstract DocumentsWriter.DocWriter FinishDocument();
+               abstract public TermsHashConsumerPerField AddField(TermsHashPerField termsHashPerField, FieldInfo fieldInfo);
+               abstract public void  Abort();
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/TermsHashPerField.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/TermsHashPerField.cs
new file mode 100644 (file)
index 0000000..2de2c55
--- /dev/null
@@ -0,0 +1,639 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using TermAttribute = Mono.Lucene.Net.Analysis.Tokenattributes.TermAttribute;
+using Fieldable = Mono.Lucene.Net.Documents.Fieldable;
+using UnicodeUtil = Mono.Lucene.Net.Util.UnicodeUtil;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       sealed class TermsHashPerField:InvertedDocConsumerPerField
+       {
+               private void  InitBlock()
+               {
+                       postingsHashHalfSize = postingsHashSize / 2;
+                       postingsHashMask = postingsHashSize - 1;
+                       postingsHash = new RawPostingList[postingsHashSize];
+               }
+               
+               internal TermsHashConsumerPerField consumer;
+               internal TermsHashPerField nextPerField;
+               internal TermsHashPerThread perThread;
+               internal DocumentsWriter.DocState docState;
+               internal FieldInvertState fieldState;
+               internal TermAttribute termAtt;
+               
+               // Copied from our perThread
+               internal CharBlockPool charPool;
+               internal IntBlockPool intPool;
+               internal ByteBlockPool bytePool;
+               
+               internal int streamCount;
+               internal int numPostingInt;
+               
+               internal FieldInfo fieldInfo;
+               
+               internal bool postingsCompacted;
+               internal int numPostings;
+               private int postingsHashSize = 4;
+               private int postingsHashHalfSize;
+               private int postingsHashMask;
+               private RawPostingList[] postingsHash;
+               private RawPostingList p;
+               
+               public TermsHashPerField(DocInverterPerField docInverterPerField, TermsHashPerThread perThread, TermsHashPerThread nextPerThread, FieldInfo fieldInfo)
+               {
+                       InitBlock();
+                       this.perThread = perThread;
+                       intPool = perThread.intPool;
+                       charPool = perThread.charPool;
+                       bytePool = perThread.bytePool;
+                       docState = perThread.docState;
+                       fieldState = docInverterPerField.fieldState;
+                       this.consumer = perThread.consumer.AddField(this, fieldInfo);
+                       streamCount = consumer.GetStreamCount();
+                       numPostingInt = 2 * streamCount;
+                       this.fieldInfo = fieldInfo;
+                       if (nextPerThread != null)
+                               nextPerField = (TermsHashPerField) nextPerThread.AddField(docInverterPerField, fieldInfo);
+                       else
+                               nextPerField = null;
+               }
+               
+               internal void  ShrinkHash(int targetSize)
+               {
+                       System.Diagnostics.Debug.Assert(postingsCompacted || numPostings == 0);
+
+            int newSize = 4;
+                       
+                       if (newSize != postingsHash.Length)
+                       {
+                               postingsHash = new RawPostingList[newSize];
+                               postingsHashSize = newSize;
+                               postingsHashHalfSize = newSize / 2;
+                               postingsHashMask = newSize - 1;
+                       }
+            System.Array.Clear(postingsHash,0,postingsHash.Length);
+               }
+               
+               public void  Reset()
+               {
+                       if (!postingsCompacted)
+                               CompactPostings();
+                       System.Diagnostics.Debug.Assert(numPostings <= postingsHash.Length);
+                       if (numPostings > 0)
+                       {
+                               perThread.termsHash.RecyclePostings(postingsHash, numPostings);
+                Array.Clear(postingsHash, 0, numPostings);
+                               numPostings = 0;
+                       }
+                       postingsCompacted = false;
+                       if (nextPerField != null)
+                               nextPerField.Reset();
+               }
+               
+               public override void  Abort()
+               {
+                       lock (this)
+                       {
+                               Reset();
+                               if (nextPerField != null)
+                                       nextPerField.Abort();
+                       }
+               }
+               
+               public void  InitReader(ByteSliceReader reader, RawPostingList p, int stream)
+               {
+                       System.Diagnostics.Debug.Assert(stream < streamCount);
+                       int[] ints = intPool.buffers[p.intStart >> DocumentsWriter.INT_BLOCK_SHIFT];
+                       int upto = p.intStart & DocumentsWriter.INT_BLOCK_MASK;
+                       reader.Init(bytePool, p.byteStart + stream * ByteBlockPool.FIRST_LEVEL_SIZE, ints[upto + stream]);
+               }
+               
+               private void  CompactPostings()
+               {
+                       lock (this)
+                       {
+                               int upto = 0;
+                               for (int i = 0; i < postingsHashSize; i++)
+                               {
+                                       if (postingsHash[i] != null)
+                                       {
+                                               if (upto < i)
+                                               {
+                                                       postingsHash[upto] = postingsHash[i];
+                                                       postingsHash[i] = null;
+                                               }
+                                               upto++;
+                                       }
+                               }
+                               
+                               System.Diagnostics.Debug.Assert(upto == numPostings);
+                               postingsCompacted = true;
+                       }
+               }
+               
+               /// <summary>Collapse the hash table &amp; sort in-place. </summary>
+               public RawPostingList[] SortPostings()
+               {
+                       CompactPostings();
+                       QuickSort(postingsHash, 0, numPostings - 1);
+                       return postingsHash;
+               }
+               
+               internal void  QuickSort(RawPostingList[] postings, int lo, int hi)
+               {
+                       if (lo >= hi)
+                               return ;
+                       else if (hi == 1 + lo)
+                       {
+                               if (ComparePostings(postings[lo], postings[hi]) > 0)
+                               {
+                                       RawPostingList tmp = postings[lo];
+                                       postings[lo] = postings[hi];
+                                       postings[hi] = tmp;
+                               }
+                               return ;
+                       }
+                       
+                       int mid = SupportClass.Number.URShift((lo + hi), 1);
+                       
+                       if (ComparePostings(postings[lo], postings[mid]) > 0)
+                       {
+                               RawPostingList tmp = postings[lo];
+                               postings[lo] = postings[mid];
+                               postings[mid] = tmp;
+                       }
+                       
+                       if (ComparePostings(postings[mid], postings[hi]) > 0)
+                       {
+                               RawPostingList tmp = postings[mid];
+                               postings[mid] = postings[hi];
+                               postings[hi] = tmp;
+                               
+                               if (ComparePostings(postings[lo], postings[mid]) > 0)
+                               {
+                                       RawPostingList tmp2 = postings[lo];
+                                       postings[lo] = postings[mid];
+                                       postings[mid] = tmp2;
+                               }
+                       }
+                       
+                       int left = lo + 1;
+                       int right = hi - 1;
+                       
+                       if (left >= right)
+                               return ;
+                       
+                       RawPostingList partition = postings[mid];
+                       
+                       for (; ; )
+                       {
+                               while (ComparePostings(postings[right], partition) > 0)
+                                       --right;
+                               
+                               while (left < right && ComparePostings(postings[left], partition) <= 0)
+                                       ++left;
+                               
+                               if (left < right)
+                               {
+                                       RawPostingList tmp = postings[left];
+                                       postings[left] = postings[right];
+                                       postings[right] = tmp;
+                                       --right;
+                               }
+                               else
+                               {
+                                       break;
+                               }
+                       }
+                       
+                       QuickSort(postings, lo, left);
+                       QuickSort(postings, left + 1, hi);
+               }
+               
+               /// <summary>Compares term text for two Posting instance and
+        /// returns -1 if p1 &lt; p2; 1 if p1 &gt; p2; else 0. 
+               /// </summary>
+               internal int ComparePostings(RawPostingList p1, RawPostingList p2)
+               {
+                       
+                       if (p1 == p2)
+                               return 0;
+                       
+                       char[] text1 = charPool.buffers[p1.textStart >> DocumentsWriter.CHAR_BLOCK_SHIFT];
+                       int pos1 = p1.textStart & DocumentsWriter.CHAR_BLOCK_MASK;
+                       char[] text2 = charPool.buffers[p2.textStart >> DocumentsWriter.CHAR_BLOCK_SHIFT];
+                       int pos2 = p2.textStart & DocumentsWriter.CHAR_BLOCK_MASK;
+                       
+                       System.Diagnostics.Debug.Assert(text1 != text2 || pos1 != pos2);
+                       
+                       while (true)
+                       {
+                               char c1 = text1[pos1++];
+                               char c2 = text2[pos2++];
+                               if (c1 != c2)
+                               {
+                                       if (0xffff == c2)
+                                               return 1;
+                                       else if (0xffff == c1)
+                                               return - 1;
+                                       else
+                                               return c1 - c2;
+                               }
+                               else
+                                       // This method should never compare equal postings
+                                       // unless p1==p2
+                                       System.Diagnostics.Debug.Assert(c1 != 0xffff);
+                       }
+               }
+               
+               /// <summary>Test whether the text for current RawPostingList p equals
+               /// current tokenText. 
+               /// </summary>
+               private bool PostingEquals(char[] tokenText, int tokenTextLen)
+               {
+                       
+                       char[] text = perThread.charPool.buffers[p.textStart >> DocumentsWriter.CHAR_BLOCK_SHIFT];
+                       System.Diagnostics.Debug.Assert(text != null);
+                       int pos = p.textStart & DocumentsWriter.CHAR_BLOCK_MASK;
+                       
+                       int tokenPos = 0;
+                       for (; tokenPos < tokenTextLen; pos++, tokenPos++)
+                               if (tokenText[tokenPos] != text[pos])
+                                       return false;
+                       return 0xffff == text[pos];
+               }
+               
+               private bool doCall;
+               private bool doNextCall;
+               
+               internal override void  Start(Fieldable f)
+               {
+                       termAtt = (TermAttribute) fieldState.attributeSource.AddAttribute(typeof(TermAttribute));
+                       consumer.Start(f);
+                       if (nextPerField != null)
+                       {
+                               nextPerField.Start(f);
+                       }
+               }
+               
+               internal override bool Start(Fieldable[] fields, int count)
+               {
+                       doCall = consumer.Start(fields, count);
+                       if (nextPerField != null)
+                               doNextCall = nextPerField.Start(fields, count);
+                       return doCall || doNextCall;
+               }
+               
+               // Secondary entry point (for 2nd & subsequent TermsHash),
+               // because token text has already been "interned" into
+               // textStart, so we hash by textStart
+               public void  Add(int textStart)
+               {
+                       
+                       int code = textStart;
+                       
+                       int hashPos = code & postingsHashMask;
+                       
+                       System.Diagnostics.Debug.Assert(!postingsCompacted);
+                       
+                       // Locate RawPostingList in hash
+                       p = postingsHash[hashPos];
+                       
+                       if (p != null && p.textStart != textStart)
+                       {
+                               // Conflict: keep searching different locations in
+                               // the hash table.
+                               int inc = ((code >> 8) + code) | 1;
+                               do 
+                               {
+                                       code += inc;
+                                       hashPos = code & postingsHashMask;
+                                       p = postingsHash[hashPos];
+                               }
+                               while (p != null && p.textStart != textStart);
+                       }
+                       
+                       if (p == null)
+                       {
+                               
+                               // First time we are seeing this token since we last
+                               // flushed the hash.
+                               
+                               // Refill?
+                               if (0 == perThread.freePostingsCount)
+                                       perThread.MorePostings();
+                               
+                               // Pull next free RawPostingList from free list
+                               p = perThread.freePostings[--perThread.freePostingsCount];
+                               System.Diagnostics.Debug.Assert(p != null);
+                               
+                               p.textStart = textStart;
+                               
+                               System.Diagnostics.Debug.Assert(postingsHash [hashPos] == null);
+                               postingsHash[hashPos] = p;
+                               numPostings++;
+                               
+                               if (numPostings == postingsHashHalfSize)
+                                       RehashPostings(2 * postingsHashSize);
+                               
+                               // Init stream slices
+                               if (numPostingInt + intPool.intUpto > DocumentsWriter.INT_BLOCK_SIZE)
+                                       intPool.NextBuffer();
+                               
+                               if (DocumentsWriter.BYTE_BLOCK_SIZE - bytePool.byteUpto < numPostingInt * ByteBlockPool.FIRST_LEVEL_SIZE)
+                                       bytePool.NextBuffer();
+                               
+                               intUptos = intPool.buffer;
+                               intUptoStart = intPool.intUpto;
+                               intPool.intUpto += streamCount;
+                               
+                               p.intStart = intUptoStart + intPool.intOffset;
+                               
+                               for (int i = 0; i < streamCount; i++)
+                               {
+                                       int upto = bytePool.NewSlice(ByteBlockPool.FIRST_LEVEL_SIZE);
+                                       intUptos[intUptoStart + i] = upto + bytePool.byteOffset;
+                               }
+                               p.byteStart = intUptos[intUptoStart];
+                               
+                               consumer.NewTerm(p);
+                       }
+                       else
+                       {
+                               intUptos = intPool.buffers[p.intStart >> DocumentsWriter.INT_BLOCK_SHIFT];
+                               intUptoStart = p.intStart & DocumentsWriter.INT_BLOCK_MASK;
+                               consumer.AddTerm(p);
+                       }
+               }
+               
+               // Primary entry point (for first TermsHash)
+               internal override void  Add()
+               {
+                       
+                       System.Diagnostics.Debug.Assert(!postingsCompacted);
+                       
+                       // We are first in the chain so we must "intern" the
+                       // term text into textStart address
+                       
+                       // Get the text of this term.
+                       char[] tokenText = termAtt.TermBuffer();
+                       ;
+                       int tokenTextLen = termAtt.TermLength();
+                       
+                       // Compute hashcode & replace any invalid UTF16 sequences
+                       int downto = tokenTextLen;
+                       int code = 0;
+                       while (downto > 0)
+                       {
+                               char ch = tokenText[--downto];
+                               
+                               if (ch >= UnicodeUtil.UNI_SUR_LOW_START && ch <= UnicodeUtil.UNI_SUR_LOW_END)
+                               {
+                                       if (0 == downto)
+                                       {
+                                               // Unpaired
+                                               ch = tokenText[downto] = (char) (UnicodeUtil.UNI_REPLACEMENT_CHAR);
+                                       }
+                                       else
+                                       {
+                                               char ch2 = tokenText[downto - 1];
+                                               if (ch2 >= UnicodeUtil.UNI_SUR_HIGH_START && ch2 <= UnicodeUtil.UNI_SUR_HIGH_END)
+                                               {
+                                                       // OK: high followed by low.  This is a valid
+                                                       // surrogate pair.
+                                                       code = ((code * 31) + ch) * 31 + ch2;
+                                                       downto--;
+                                                       continue;
+                                               }
+                                               else
+                                               {
+                                                       // Unpaired
+                                                       ch = tokenText[downto] = (char) (UnicodeUtil.UNI_REPLACEMENT_CHAR);
+                                               }
+                                       }
+                               }
+                               else if (ch >= UnicodeUtil.UNI_SUR_HIGH_START && (ch <= UnicodeUtil.UNI_SUR_HIGH_END || ch == 0xffff))
+                               {
+                                       // Unpaired or 0xffff
+                                       ch = tokenText[downto] = (char) (UnicodeUtil.UNI_REPLACEMENT_CHAR);
+                               }
+                               
+                               code = (code * 31) + ch;
+                       }
+                       
+                       int hashPos = code & postingsHashMask;
+                       
+                       // Locate RawPostingList in hash
+                       p = postingsHash[hashPos];
+                       
+                       if (p != null && !PostingEquals(tokenText, tokenTextLen))
+                       {
+                               // Conflict: keep searching different locations in
+                               // the hash table.
+                               int inc = ((code >> 8) + code) | 1;
+                               do 
+                               {
+                                       code += inc;
+                                       hashPos = code & postingsHashMask;
+                                       p = postingsHash[hashPos];
+                               }
+                               while (p != null && !PostingEquals(tokenText, tokenTextLen));
+                       }
+                       
+                       if (p == null)
+                       {
+                               
+                               // First time we are seeing this token since we last
+                               // flushed the hash.
+                               int textLen1 = 1 + tokenTextLen;
+                               if (textLen1 + charPool.charUpto > DocumentsWriter.CHAR_BLOCK_SIZE)
+                               {
+                                       if (textLen1 > DocumentsWriter.CHAR_BLOCK_SIZE)
+                                       {
+                                               // Just skip this term, to remain as robust as
+                                               // possible during indexing.  A TokenFilter
+                                               // can be inserted into the analyzer chain if
+                                               // other behavior is wanted (pruning the term
+                                               // to a prefix, throwing an exception, etc).
+                                               
+                                               if (docState.maxTermPrefix == null)
+                                                       docState.maxTermPrefix = new System.String(tokenText, 0, 30);
+                                               
+                                               consumer.SkippingLongTerm();
+                                               return ;
+                                       }
+                                       charPool.NextBuffer();
+                               }
+                               
+                               // Refill?
+                               if (0 == perThread.freePostingsCount)
+                                       perThread.MorePostings();
+                               
+                               // Pull next free RawPostingList from free list
+                               p = perThread.freePostings[--perThread.freePostingsCount];
+                               System.Diagnostics.Debug.Assert(p != null);
+                               
+                               char[] text = charPool.buffer;
+                               int textUpto = charPool.charUpto;
+                               p.textStart = textUpto + charPool.charOffset;
+                               charPool.charUpto += textLen1;
+                               Array.Copy(tokenText, 0, text, textUpto, tokenTextLen);
+                               text[textUpto + tokenTextLen] = (char) (0xffff);
+                               
+                               System.Diagnostics.Debug.Assert(postingsHash [hashPos] == null);
+                               postingsHash[hashPos] = p;
+                               numPostings++;
+                               
+                               if (numPostings == postingsHashHalfSize)
+                                       RehashPostings(2 * postingsHashSize);
+                               
+                               // Init stream slices
+                               if (numPostingInt + intPool.intUpto > DocumentsWriter.INT_BLOCK_SIZE)
+                                       intPool.NextBuffer();
+                               
+                               if (DocumentsWriter.BYTE_BLOCK_SIZE - bytePool.byteUpto < numPostingInt * ByteBlockPool.FIRST_LEVEL_SIZE)
+                                       bytePool.NextBuffer();
+                               
+                               intUptos = intPool.buffer;
+                               intUptoStart = intPool.intUpto;
+                               intPool.intUpto += streamCount;
+                               
+                               p.intStart = intUptoStart + intPool.intOffset;
+                               
+                               for (int i = 0; i < streamCount; i++)
+                               {
+                                       int upto = bytePool.NewSlice(ByteBlockPool.FIRST_LEVEL_SIZE);
+                                       intUptos[intUptoStart + i] = upto + bytePool.byteOffset;
+                               }
+                               p.byteStart = intUptos[intUptoStart];
+                               
+                               consumer.NewTerm(p);
+                       }
+                       else
+                       {
+                               intUptos = intPool.buffers[p.intStart >> DocumentsWriter.INT_BLOCK_SHIFT];
+                               intUptoStart = p.intStart & DocumentsWriter.INT_BLOCK_MASK;
+                               consumer.AddTerm(p);
+                       }
+                       
+                       if (doNextCall)
+                               nextPerField.Add(p.textStart);
+               }
+               
+               internal int[] intUptos;
+               internal int intUptoStart;
+               
+               internal void  WriteByte(int stream, byte b)
+               {
+                       int upto = intUptos[intUptoStart + stream];
+                       byte[] bytes = bytePool.buffers[upto >> DocumentsWriter.BYTE_BLOCK_SHIFT];
+                       System.Diagnostics.Debug.Assert(bytes != null);
+                       int offset = upto & DocumentsWriter.BYTE_BLOCK_MASK;
+                       if (bytes[offset] != 0)
+                       {
+                               // End of slice; allocate a new one
+                               offset = bytePool.AllocSlice(bytes, offset);
+                               bytes = bytePool.buffer;
+                               intUptos[intUptoStart + stream] = offset + bytePool.byteOffset;
+                       }
+                       bytes[offset] = b;
+                       (intUptos[intUptoStart + stream])++;
+               }
+               
+               public void  WriteBytes(int stream, byte[] b, int offset, int len)
+               {
+                       // TODO: optimize
+                       int end = offset + len;
+                       for (int i = offset; i < end; i++)
+                               WriteByte(stream, b[i]);
+               }
+               
+               internal void  WriteVInt(int stream, int i)
+               {
+                       System.Diagnostics.Debug.Assert(stream < streamCount);
+                       while ((i & ~ 0x7F) != 0)
+                       {
+                               WriteByte(stream, (byte) ((i & 0x7f) | 0x80));
+                               i = SupportClass.Number.URShift(i, 7);
+                       }
+                       WriteByte(stream, (byte) i);
+               }
+               
+               internal override void  Finish()
+               {
+                       consumer.Finish();
+                       if (nextPerField != null)
+                               nextPerField.Finish();
+               }
+               
+               /// <summary>Called when postings hash is too small (> 50%
+        /// occupied) or too large (&lt; 20% occupied). 
+               /// </summary>
+               internal void  RehashPostings(int newSize)
+               {
+                       
+                       int newMask = newSize - 1;
+                       
+                       RawPostingList[] newHash = new RawPostingList[newSize];
+                       for (int i = 0; i < postingsHashSize; i++)
+                       {
+                               RawPostingList p0 = postingsHash[i];
+                               if (p0 != null)
+                               {
+                                       int code;
+                                       if (perThread.primary)
+                                       {
+                                               int start = p0.textStart & DocumentsWriter.CHAR_BLOCK_MASK;
+                                               char[] text = charPool.buffers[p0.textStart >> DocumentsWriter.CHAR_BLOCK_SHIFT];
+                                               int pos = start;
+                                               while (text[pos] != 0xffff)
+                                                       pos++;
+                                               code = 0;
+                                               while (pos > start)
+                                                       code = (code * 31) + text[--pos];
+                                       }
+                                       else
+                                               code = p0.textStart;
+                                       
+                                       int hashPos = code & newMask;
+                                       System.Diagnostics.Debug.Assert(hashPos >= 0);
+                                       if (newHash[hashPos] != null)
+                                       {
+                                               int inc = ((code >> 8) + code) | 1;
+                                               do 
+                                               {
+                                                       code += inc;
+                                                       hashPos = code & newMask;
+                                               }
+                                               while (newHash[hashPos] != null);
+                                       }
+                                       newHash[hashPos] = p0;
+                               }
+                       }
+                       
+                       postingsHashMask = newMask;
+                       postingsHash = newHash;
+                       postingsHashSize = newSize;
+                       postingsHashHalfSize = newSize >> 1;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/TermsHashPerThread.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Index/TermsHashPerThread.cs
new file mode 100644 (file)
index 0000000..6d90669
--- /dev/null
@@ -0,0 +1,140 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Index
+{
+       
+       sealed class TermsHashPerThread:InvertedDocConsumerPerThread
+       {
+               
+               internal TermsHash termsHash;
+               internal TermsHashConsumerPerThread consumer;
+               internal TermsHashPerThread nextPerThread;
+               
+               internal CharBlockPool charPool;
+               internal IntBlockPool intPool;
+               internal ByteBlockPool bytePool;
+               internal bool primary;
+               internal DocumentsWriter.DocState docState;
+               
+               internal RawPostingList[] freePostings = new RawPostingList[256];
+               internal int freePostingsCount;
+               
+               public TermsHashPerThread(DocInverterPerThread docInverterPerThread, TermsHash termsHash, TermsHash nextTermsHash, TermsHashPerThread primaryPerThread)
+               {
+                       docState = docInverterPerThread.docState;
+                       
+                       this.termsHash = termsHash;
+                       this.consumer = termsHash.consumer.AddThread(this);
+                       
+                       if (nextTermsHash != null)
+                       {
+                               // We are primary
+                               charPool = new CharBlockPool(termsHash.docWriter);
+                               primary = true;
+                       }
+                       else
+                       {
+                               charPool = primaryPerThread.charPool;
+                               primary = false;
+                       }
+                       
+                       intPool = new IntBlockPool(termsHash.docWriter, termsHash.trackAllocations);
+                       bytePool = new ByteBlockPool(termsHash.docWriter.byteBlockAllocator, termsHash.trackAllocations);
+                       
+                       if (nextTermsHash != null)
+                               nextPerThread = nextTermsHash.AddThread(docInverterPerThread, this);
+                       else
+                               nextPerThread = null;
+               }
+               
+               internal override InvertedDocConsumerPerField AddField(DocInverterPerField docInverterPerField, FieldInfo fieldInfo)
+               {
+                       return new TermsHashPerField(docInverterPerField, this, nextPerThread, fieldInfo);
+               }
+               
+               public override void  Abort()
+               {
+                       lock (this)
+                       {
+                               Reset(true);
+                               consumer.Abort();
+                               if (nextPerThread != null)
+                                       nextPerThread.Abort();
+                       }
+               }
+               
+               // perField calls this when it needs more postings:
+               internal void  MorePostings()
+               {
+                       System.Diagnostics.Debug.Assert(freePostingsCount == 0);
+                       termsHash.GetPostings(freePostings);
+                       freePostingsCount = freePostings.Length;
+                       System.Diagnostics.Debug.Assert(noNullPostings(freePostings, freePostingsCount, "consumer=" + consumer));
+               }
+               
+               private static bool noNullPostings(RawPostingList[] postings, int count, System.String details)
+               {
+                       for (int i = 0; i < count; i++)
+                               System.Diagnostics.Debug.Assert(postings[i] != null, "postings[" + i + "] of " + count + " is null: " + details);
+                       return true;
+               }
+               
+               public override void  StartDocument()
+               {
+                       consumer.StartDocument();
+                       if (nextPerThread != null)
+                               nextPerThread.consumer.StartDocument();
+               }
+               
+               public override DocumentsWriter.DocWriter FinishDocument()
+               {
+                       DocumentsWriter.DocWriter doc = consumer.FinishDocument();
+                       
+                       DocumentsWriter.DocWriter doc2;
+                       if (nextPerThread != null)
+                               doc2 = nextPerThread.consumer.FinishDocument();
+                       else
+                               doc2 = null;
+                       if (doc == null)
+                               return doc2;
+                       else
+                       {
+                               doc.SetNext(doc2);
+                               return doc;
+                       }
+               }
+               
+               // Clear all state
+               internal void  Reset(bool recyclePostings)
+               {
+                       intPool.Reset();
+                       bytePool.Reset();
+                       
+                       if (primary)
+                               charPool.Reset();
+                       
+                       if (recyclePostings)
+                       {
+                               termsHash.RecyclePostings(freePostings, freePostingsCount);
+                               freePostingsCount = 0;
+                       }
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/LZOCompressor.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/LZOCompressor.cs
new file mode 100644 (file)
index 0000000..da0d715
--- /dev/null
@@ -0,0 +1,135 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+*/
+
+// LZO.Net
+// $Id: LZOCompressor.cs,v 1.1 2004/02/22 17:44:04 laptop Exp $
+
+namespace Simplicit.Net.Lzo {
+       using System;
+       using System.Diagnostics;
+       using System.Runtime.InteropServices;
+
+       /// <summary>
+       /// Wrapper class for the highly performant LZO compression library
+       /// </summary>
+       public class LZOCompressor {
+               private static TraceSwitch _traceSwitch = new TraceSwitch("Simplicit.Net.Lzo", "Switch for tracing of the LZOCompressor-Class");
+
+               #region Dll-Imports
+               [DllImport("lzo.dll")]
+               private static extern int __lzo_init3();
+               [DllImport("lzo.dll")]
+               private static extern string lzo_version_string();
+               [DllImport("lzo.dll")]
+               private static extern string lzo_version_date();
+               [DllImport("lzo.dll")]
+               private static extern int lzo1x_1_compress(
+                       byte[] src,
+                       int src_len,
+                       byte[] dst,
+                       ref int dst_len,
+                       byte[] wrkmem
+                       );
+               [DllImport("lzo.dll")]
+               private static extern int lzo1x_decompress(
+                       byte[] src,
+                       int src_len,
+                       byte[] dst,
+                       ref int dst_len,
+                       byte[] wrkmem);
+               #endregion
+               
+               private byte[] _workMemory = new byte[16384L * 4];
+
+               static LZOCompressor() {
+                       int init = __lzo_init3();
+                       if(init != 0) {
+                               throw new Exception("Initialization of LZO-Compressor failed !");
+                       }
+               }
+
+               /// <summary>
+               /// Constructor.
+               /// </summary>
+               public LZOCompressor() {
+               }
+
+               /// <summary>
+               /// Version string of the compression library.
+               /// </summary>
+               public string Version {
+                       get {
+                               return lzo_version_string();
+                       }
+               }
+
+               /// <summary>
+               /// Version date of the compression library
+               /// </summary>
+               public string VersionDate {
+                       get {
+                               return lzo_version_date();
+                       }
+               }
+
+               /// <summary>
+               /// Compresses a byte array and returns the compressed data in a new
+               /// array. You need the original length of the array to decompress it.
+               /// </summary>
+               /// <param name="src">Source array for compression</param>
+               /// <returns>Byte array containing the compressed data</returns>
+               public byte[] Compress(byte[] src) {
+                       if(_traceSwitch.TraceVerbose) {
+                               Trace.WriteLine(String.Format("LZOCompressor: trying to compress {0}", src.Length));
+                       }
+                       byte[] dst = new byte[src.Length + src.Length / 64 + 16 + 3 + 4];
+                       int outlen = 0;
+                       lzo1x_1_compress(src, src.Length, dst, ref outlen, _workMemory);
+                       if(_traceSwitch.TraceVerbose) {
+                               Trace.WriteLine(String.Format("LZOCompressor: compressed {0} to {1} bytes", src.Length, outlen));
+                       }
+                       byte[] ret = new byte[outlen + 4];
+                       Array.Copy(dst, 0, ret, 0, outlen);
+                       byte[] outlenarr = BitConverter.GetBytes(src.Length);
+                       Array.Copy(outlenarr, 0, ret, outlen, 4);
+                       return ret;
+               }
+
+               /// <summary>
+               /// Decompresses compressed data to its original state.
+               /// </summary>
+               /// <param name="src">Source array to be decompressed</param>
+               /// <returns>Decompressed data</returns>
+               public byte[] Decompress(byte[] src) {
+                       if(_traceSwitch.TraceVerbose) {
+                               Trace.WriteLine(String.Format("LZOCompressor: trying to decompress {0}", src.Length));
+                       }
+                       int origlen = BitConverter.ToInt32(src, src.Length - 4);
+                       byte[] dst = new byte[origlen];
+                       int outlen = origlen;
+                       lzo1x_decompress(src, src.Length - 4, dst, ref outlen, _workMemory);
+                       if(_traceSwitch.TraceVerbose) {
+                               Trace.WriteLine(String.Format("LZOCompressor: decompressed {0} to {1} bytes", src.Length, origlen));
+                       }
+                       return dst;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Lucene.Net.Search.RemoteSearchable.config b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Lucene.Net.Search.RemoteSearchable.config
new file mode 100644 (file)
index 0000000..aeebfe5
--- /dev/null
@@ -0,0 +1,6 @@
+๏ปฟ<?xml version='1.0'?>\r<!--\r\r
+ Licensed to the Apache Software Foundation (ASF) under one\r or more contributor license agreements.  See the NOTICE file\r distributed with this work for additional information\r regarding copyright ownership.  The ASF licenses this file\r to you under the Apache License, Version 2.0 (the\r "License"); you may not use this file except in compliance\r with the License.  You may obtain a copy of the License at\r\r
+   http://www.apache.org/licenses/LICENSE-2.0\r\r
+ Unless required by applicable law or agreed to in writing,\r software distributed under the License is distributed on an\r "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\r KIND, either express or implied.  See the License for the\r specific language governing permissions and limitations\r under the License.\r\r
+-->\r\r
+<configuration>\r\r    <System.Runtime.Remoting>\r\r        <customErrors mode = "off" />\r\r    </System.Runtime.Remoting>\r\r</configuration>\r\r
\ No newline at end of file
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Lucene.Net.Search.TestSort.config b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Lucene.Net.Search.TestSort.config
new file mode 100644 (file)
index 0000000..aeebfe5
--- /dev/null
@@ -0,0 +1,6 @@
+๏ปฟ<?xml version='1.0'?>\r<!--\r\r
+ Licensed to the Apache Software Foundation (ASF) under one\r or more contributor license agreements.  See the NOTICE file\r distributed with this work for additional information\r regarding copyright ownership.  The ASF licenses this file\r to you under the Apache License, Version 2.0 (the\r "License"); you may not use this file except in compliance\r with the License.  You may obtain a copy of the License at\r\r
+   http://www.apache.org/licenses/LICENSE-2.0\r\r
+ Unless required by applicable law or agreed to in writing,\r software distributed under the License is distributed on an\r "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\r KIND, either express or implied.  See the License for the\r specific language governing permissions and limitations\r under the License.\r\r
+-->\r\r
+<configuration>\r\r    <System.Runtime.Remoting>\r\r        <customErrors mode = "off" />\r\r    </System.Runtime.Remoting>\r\r</configuration>\r\r
\ No newline at end of file
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Lucene.Net.csproj b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Lucene.Net.csproj
new file mode 100644 (file)
index 0000000..66fdf4f
--- /dev/null
@@ -0,0 +1,989 @@
+๏ปฟ<?xml version="1.0" encoding="utf-8"?>\r
+<!--\r
+\r
+ Licensed to the Apache Software Foundation (ASF) under one\r
+ or more contributor license agreements.  See the NOTICE file\r
+ distributed with this work for additional information\r
+ regarding copyright ownership.  The ASF licenses this file\r
+ to you under the Apache License, Version 2.0 (the\r
+ "License"); you may not use this file except in compliance\r
+ with the License.  You may obtain a copy of the License at\r
+\r
+   http://www.apache.org/licenses/LICENSE-2.0\r
+\r
+ Unless required by applicable law or agreed to in writing,\r
+ software distributed under the License is distributed on an\r
+ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\r
+ KIND, either express or implied.  See the License for the\r
+ specific language governing permissions and limitations\r
+ under the License.\r
+\r
+-->\r
+\r
+<Project DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003" ToolsVersion="4.0">\r
+  <PropertyGroup>\r
+    <ProjectType>Local</ProjectType>\r
+    <ProductVersion>8.0.50727</ProductVersion>\r
+    <SchemaVersion>2.0</SchemaVersion>\r
+    <ProjectGuid>{5D4AD9BE-1FFB-41AB-9943-25737971BF57}</ProjectGuid>\r
+    <Configuration Condition=" '$(Configuration)' == '' ">Debug</Configuration>\r
+    <Platform Condition=" '$(Platform)' == '' ">AnyCPU</Platform>\r
+    <ApplicationIcon>\r
+    </ApplicationIcon>\r
+    <AssemblyKeyContainerName>\r
+    </AssemblyKeyContainerName>\r
+    <AssemblyName>Lucene.Net</AssemblyName>\r
+    <AssemblyOriginatorKeyFile>Lucene.Net.snk</AssemblyOriginatorKeyFile>\r
+    <DefaultClientScript>JScript</DefaultClientScript>\r
+    <DefaultHTMLPageLayout>Grid</DefaultHTMLPageLayout>\r
+    <DefaultTargetSchema>IE50</DefaultTargetSchema>\r
+    <DelaySign>false</DelaySign>\r
+    <OutputType>Library</OutputType>\r
+    <RootNamespace>Lucene.Net</RootNamespace>\r
+    <RunPostBuildEvent>OnBuildSuccess</RunPostBuildEvent>\r
+    <StartupObject>\r
+    </StartupObject>\r
+    <FileUpgradeFlags>\r
+    </FileUpgradeFlags>\r
+    <UpgradeBackupLocation>\r
+    </UpgradeBackupLocation>\r
+    <TargetFrameworkVersion>v4.0</TargetFrameworkVersion>\r
+    <OldToolsVersion>2.0</OldToolsVersion>\r
+    <PublishUrl>publish\</PublishUrl>\r
+    <Install>true</Install>\r
+    <InstallFrom>Disk</InstallFrom>\r
+    <UpdateEnabled>false</UpdateEnabled>\r
+    <UpdateMode>Foreground</UpdateMode>\r
+    <UpdateInterval>7</UpdateInterval>\r
+    <UpdateIntervalUnits>Days</UpdateIntervalUnits>\r
+    <UpdatePeriodically>false</UpdatePeriodically>\r
+    <UpdateRequired>false</UpdateRequired>\r
+    <MapFileExtensions>true</MapFileExtensions>\r
+    <ApplicationRevision>0</ApplicationRevision>\r
+    <ApplicationVersion>1.0.0.%2a</ApplicationVersion>\r
+    <IsWebBootstrapper>false</IsWebBootstrapper>\r
+    <UseApplicationTrust>false</UseApplicationTrust>\r
+    <BootstrapperEnabled>true</BootstrapperEnabled>\r
+    <TargetFrameworkProfile />\r
+  </PropertyGroup>\r
+  <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Debug|AnyCPU' ">\r
+    <OutputPath>..\..\build\bin\core\Debug\</OutputPath>\r
+    <AllowUnsafeBlocks>false</AllowUnsafeBlocks>\r
+    <BaseAddress>285212672</BaseAddress>\r
+    <CheckForOverflowUnderflow>false</CheckForOverflowUnderflow>\r
+    <ConfigurationOverrideFile>\r
+    </ConfigurationOverrideFile>\r
+    <DefineConstants>TRACE;DEBUG</DefineConstants>\r
+    <DocumentationFile>\r
+    </DocumentationFile>\r
+    <DebugSymbols>true</DebugSymbols>\r
+    <FileAlignment>4096</FileAlignment>\r
+    <NoStdLib>false</NoStdLib>\r
+    <NoWarn>618</NoWarn>\r
+    <Optimize>false</Optimize>\r
+    <RegisterForComInterop>false</RegisterForComInterop>\r
+    <RemoveIntegerChecks>false</RemoveIntegerChecks>\r
+    <TreatWarningsAsErrors>false</TreatWarningsAsErrors>\r
+    <WarningLevel>4</WarningLevel>\r
+    <DebugType>full</DebugType>\r
+    <ErrorReport>prompt</ErrorReport>\r
+  </PropertyGroup>\r
+  <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Release|AnyCPU' ">\r
+    <OutputPath>..\..\build\bin\core\Release\</OutputPath>\r
+    <AllowUnsafeBlocks>true</AllowUnsafeBlocks>\r
+    <BaseAddress>285212672</BaseAddress>\r
+    <CheckForOverflowUnderflow>false</CheckForOverflowUnderflow>\r
+    <ConfigurationOverrideFile>\r
+    </ConfigurationOverrideFile>\r
+    <DefineConstants>TRACE</DefineConstants>\r
+    <DocumentationFile>..\..\build\bin\core\Release\Lucene.Net.XML</DocumentationFile>\r
+    <FileAlignment>4096</FileAlignment>\r
+    <NoStdLib>false</NoStdLib>\r
+    <NoWarn>618</NoWarn>\r
+    <Optimize>true</Optimize>\r
+    <RegisterForComInterop>false</RegisterForComInterop>\r
+    <RemoveIntegerChecks>false</RemoveIntegerChecks>\r
+    <TreatWarningsAsErrors>false</TreatWarningsAsErrors>\r
+    <WarningLevel>4</WarningLevel>\r
+    <DebugType>none</DebugType>\r
+    <ErrorReport>prompt</ErrorReport>\r
+  </PropertyGroup>\r
+  <PropertyGroup>\r
+    <SignAssembly>true</SignAssembly>\r
+  </PropertyGroup>\r
+  <ItemGroup>\r
+    <Reference Include="System">\r
+      <Name>System</Name>\r
+    </Reference>\r
+    <Reference Include="System.configuration" />\r
+    <Reference Include="System.Data">\r
+      <Name>System.Data</Name>\r
+    </Reference>\r
+    <Reference Include="System.Runtime.Remoting">\r
+      <Name>System.Runtime.Remoting</Name>\r
+    </Reference>\r
+    <Reference Include="System.Windows.Forms" />\r
+    <Reference Include="System.Xml">\r
+      <Name>System.XML</Name>\r
+    </Reference>\r
+  </ItemGroup>\r
+  <ItemGroup>\r
+    <Compile Include="Analysis\Analyzer.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Analysis\ASCIIFoldingFilter.cs" />\r
+    <Compile Include="Analysis\BaseCharFilter.cs" />\r
+    <Compile Include="Analysis\CachingTokenFilter.cs" />\r
+    <Compile Include="Analysis\CharacterCache.cs" />\r
+    <Compile Include="Analysis\CharArraySet.cs" />\r
+    <Compile Include="Analysis\CharFilter.cs" />\r
+    <Compile Include="Analysis\CharReader.cs" />\r
+    <Compile Include="Analysis\CharStream.cs" />\r
+    <Compile Include="Analysis\CharTokenizer.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Analysis\ISOLatin1AccentFilter.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Analysis\KeywordAnalyzer.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Analysis\KeywordTokenizer.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Analysis\LengthFilter.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Analysis\LetterTokenizer.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Analysis\LowerCaseFilter.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Analysis\LowerCaseTokenizer.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Analysis\MappingCharFilter.cs" />\r
+    <Compile Include="Analysis\NormalizeCharMap.cs" />\r
+    <Compile Include="Analysis\NumericTokenStream.cs" />\r
+    <Compile Include="Analysis\PerFieldAnalyzerWrapper.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Analysis\PorterStemFilter.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Analysis\PorterStemmer.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Analysis\SimpleAnalyzer.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Analysis\SinkTokenizer.cs" />\r
+    <Compile Include="Analysis\Standard\StandardAnalyzer.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Analysis\Standard\StandardFilter.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Analysis\Standard\StandardTokenizer.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Analysis\Standard\StandardTokenizerImpl.cs" />\r
+    <Compile Include="Analysis\StopAnalyzer.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Analysis\StopFilter.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Analysis\TeeSinkTokenFilter.cs" />\r
+    <Compile Include="Analysis\TeeTokenFilter.cs" />\r
+    <Compile Include="Analysis\Token.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Analysis\Tokenattributes\FlagsAttribute.cs" />\r
+    <Compile Include="Analysis\Tokenattributes\FlagsAttributeImpl.cs" />\r
+    <Compile Include="Analysis\Tokenattributes\OffsetAttribute.cs" />\r
+    <Compile Include="Analysis\Tokenattributes\OffsetAttributeImpl.cs" />\r
+    <Compile Include="Analysis\Tokenattributes\PayloadAttribute.cs" />\r
+    <Compile Include="Analysis\Tokenattributes\PayloadAttributeImpl.cs" />\r
+    <Compile Include="Analysis\Tokenattributes\PositionIncrementAttribute.cs" />\r
+    <Compile Include="Analysis\Tokenattributes\PositionIncrementAttributeImpl.cs" />\r
+    <Compile Include="Analysis\Tokenattributes\TermAttribute.cs" />\r
+    <Compile Include="Analysis\Tokenattributes\TermAttributeImpl.cs" />\r
+    <Compile Include="Analysis\Tokenattributes\TypeAttribute.cs" />\r
+    <Compile Include="Analysis\Tokenattributes\TypeAttributeImpl.cs" />\r
+    <Compile Include="Analysis\TokenFilter.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Analysis\Tokenizer.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Analysis\TokenStream.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Analysis\TokenWrapper.cs" />\r
+    <Compile Include="Analysis\WhitespaceAnalyzer.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Analysis\WhitespaceTokenizer.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Analysis\WordlistLoader.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="AssemblyInfo.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Document\AbstractField.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Document\CompressionTools.cs" />\r
+    <Compile Include="Document\DateField.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Document\DateTools.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Document\Document.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Document\Field.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Document\Fieldable.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Document\FieldSelector.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Document\FieldSelectorResult.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Document\LoadFirstFieldSelector.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Document\MapFieldSelector.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Document\NumberTools.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Document\NumericField.cs" />\r
+    <Compile Include="Document\SetBasedFieldSelector.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Index\AbstractAllTermDocs.cs" />\r
+    <Compile Include="Index\AllTermDocs.cs" />\r
+    <Compile Include="Index\BufferedDeletes.cs" />\r
+    <Compile Include="Index\ByteBlockPool.cs" />\r
+    <Compile Include="Index\ByteSliceReader.cs" />\r
+    <Compile Include="Index\ByteSliceWriter.cs" />\r
+    <Compile Include="Index\CharBlockPool.cs" />\r
+    <Compile Include="Index\CheckIndex.cs" />\r
+    <Compile Include="Index\CompoundFileReader.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Index\CompoundFileWriter.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Index\ConcurrentMergeScheduler.cs" />\r
+    <Compile Include="Index\CorruptIndexException.cs" />\r
+    <Compile Include="Index\DefaultSkipListReader.cs" />\r
+    <Compile Include="Index\DefaultSkipListWriter.cs" />\r
+    <Compile Include="Index\DirectoryOwningReader.cs" />\r
+    <Compile Include="Index\DirectoryReader.cs" />\r
+    <Compile Include="Index\DocConsumer.cs" />\r
+    <Compile Include="Index\DocConsumerPerThread.cs" />\r
+    <Compile Include="Index\DocFieldConsumer.cs" />\r
+    <Compile Include="Index\DocFieldConsumerPerField.cs" />\r
+    <Compile Include="Index\DocFieldConsumerPerThread.cs" />\r
+    <Compile Include="Index\DocFieldConsumers.cs" />\r
+    <Compile Include="Index\DocFieldConsumersPerField.cs" />\r
+    <Compile Include="Index\DocFieldConsumersPerThread.cs" />\r
+    <Compile Include="Index\DocFieldProcessor.cs" />\r
+    <Compile Include="Index\DocFieldProcessorPerField.cs" />\r
+    <Compile Include="Index\DocFieldProcessorPerThread.cs" />\r
+    <Compile Include="Index\DocInverter.cs" />\r
+    <Compile Include="Index\DocInverterPerField.cs" />\r
+    <Compile Include="Index\DocInverterPerThread.cs" />\r
+    <Compile Include="Index\DocumentsWriter.cs" />\r
+    <Compile Include="Index\DocumentsWriterThreadState.cs" />\r
+    <Compile Include="Index\FieldInfo.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Index\FieldInfos.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Index\FieldInvertState.cs" />\r
+    <Compile Include="Index\FieldReaderException.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Index\FieldSortedTermVectorMapper.cs" />\r
+    <Compile Include="Index\FieldsReader.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Index\FieldsWriter.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Index\FilterIndexReader.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Index\FormatPostingsDocsConsumer.cs" />\r
+    <Compile Include="Index\FormatPostingsDocsWriter.cs" />\r
+    <Compile Include="Index\FormatPostingsFieldsConsumer.cs" />\r
+    <Compile Include="Index\FormatPostingsFieldsWriter.cs" />\r
+    <Compile Include="Index\FormatPostingsPositionsConsumer.cs" />\r
+    <Compile Include="Index\FormatPostingsPositionsWriter.cs" />\r
+    <Compile Include="Index\FormatPostingsTermsConsumer.cs" />\r
+    <Compile Include="Index\FormatPostingsTermsWriter.cs" />\r
+    <Compile Include="Index\FreqProxFieldMergeState.cs" />\r
+    <Compile Include="Index\FreqProxTermsWriter.cs" />\r
+    <Compile Include="Index\FreqProxTermsWriterPerField.cs" />\r
+    <Compile Include="Index\FreqProxTermsWriterPerThread.cs" />\r
+    <Compile Include="Index\IndexCommit.cs" />\r
+    <Compile Include="Index\IndexCommitPoint.cs" />\r
+    <Compile Include="Index\IndexDeletionPolicy.cs" />\r
+    <Compile Include="Index\IndexFileDeleter.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Index\IndexFileNameFilter.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Index\IndexFileNames.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Index\IndexModifier.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Index\IndexReader.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Index\IndexWriter.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Index\IntBlockPool.cs" />\r
+    <Compile Include="Index\InvertedDocConsumer.cs" />\r
+    <Compile Include="Index\InvertedDocConsumerPerField.cs" />\r
+    <Compile Include="Index\InvertedDocConsumerPerThread.cs" />\r
+    <Compile Include="Index\InvertedDocEndConsumer.cs" />\r
+    <Compile Include="Index\InvertedDocEndConsumerPerField.cs" />\r
+    <Compile Include="Index\InvertedDocEndConsumerPerThread.cs" />\r
+    <Compile Include="Index\KeepOnlyLastCommitDeletionPolicy.cs" />\r
+    <Compile Include="Index\LogByteSizeMergePolicy.cs" />\r
+    <Compile Include="Index\LogDocMergePolicy.cs" />\r
+    <Compile Include="Index\LogMergePolicy.cs" />\r
+    <Compile Include="Index\MergeDocIDRemapper.cs" />\r
+    <Compile Include="Index\MergePolicy.cs" />\r
+    <Compile Include="Index\MergeScheduler.cs" />\r
+    <Compile Include="Index\MultiLevelSkipListReader.cs" />\r
+    <Compile Include="Index\MultiLevelSkipListWriter.cs" />\r
+    <Compile Include="Index\MultipleTermPositions.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Index\MultiReader.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Index\NormsWriter.cs" />\r
+    <Compile Include="Index\NormsWriterPerField.cs" />\r
+    <Compile Include="Index\NormsWriterPerThread.cs" />\r
+    <Compile Include="Index\ParallelReader.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Index\Payload.cs" />\r
+    <Compile Include="Index\PositionBasedTermVectorMapper.cs" />\r
+    <Compile Include="Index\RawPostingList.cs" />\r
+    <Compile Include="Index\ReadOnlyDirectoryReader.cs" />\r
+    <Compile Include="Index\ReadOnlySegmentReader.cs" />\r
+    <Compile Include="Index\ReusableStringReader.cs" />\r
+    <Compile Include="Index\SegmentInfo.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Index\SegmentInfos.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Index\SegmentMergeInfo.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Index\SegmentMergeQueue.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Index\SegmentMerger.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Index\SegmentReader.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Index\SegmentTermDocs.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Index\SegmentTermEnum.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Index\SegmentTermPositions.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Index\SegmentTermPositionVector.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Index\SegmentTermVector.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Index\SegmentWriteState.cs" />\r
+    <Compile Include="Index\SerialMergeScheduler.cs" />\r
+    <Compile Include="Index\SnapshotDeletionPolicy.cs" />\r
+    <Compile Include="Index\SortedTermVectorMapper.cs" />\r
+    <Compile Include="Index\StaleReaderException.cs" />\r
+    <Compile Include="Index\StoredFieldsWriter.cs" />\r
+    <Compile Include="Index\StoredFieldsWriterPerThread.cs" />\r
+    <Compile Include="Index\Term.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Index\TermBuffer.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Index\TermDocs.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Index\TermEnum.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Index\TermFreqVector.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Index\TermInfo.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Index\TermInfosReader.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Index\TermInfosWriter.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Index\TermPositions.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Index\TermPositionVector.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Index\TermsHash.cs" />\r
+    <Compile Include="Index\TermsHashConsumer.cs" />\r
+    <Compile Include="Index\TermsHashConsumerPerField.cs" />\r
+    <Compile Include="Index\TermsHashConsumerPerThread.cs" />\r
+    <Compile Include="Index\TermsHashPerField.cs" />\r
+    <Compile Include="Index\TermsHashPerThread.cs" />\r
+    <Compile Include="Index\TermVectorEntry.cs" />\r
+    <Compile Include="Index\TermVectorEntryFreqSortedComparator.cs" />\r
+    <Compile Include="Index\TermVectorMapper.cs" />\r
+    <Compile Include="Index\TermVectorOffsetInfo.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Index\TermVectorsReader.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Index\TermVectorsTermsWriter.cs" />\r
+    <Compile Include="Index\TermVectorsTermsWriterPerField.cs" />\r
+    <Compile Include="Index\TermVectorsTermsWriterPerThread.cs" />\r
+    <Compile Include="Index\TermVectorsWriter.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="LucenePackage.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Messages\Message.cs" />\r
+    <Compile Include="Messages\MessageImpl.cs" />\r
+    <Compile Include="Messages\NLS.cs" />\r
+    <Compile Include="Messages\NLSException.cs" />\r
+    <Compile Include="QueryParser\CharStream.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="QueryParser\FastCharStream.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="QueryParser\MultiFieldQueryParser.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="QueryParser\ParseException.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="QueryParser\QueryParser.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="QueryParser\QueryParserConstants.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="QueryParser\QueryParserTokenManager.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="QueryParser\Token.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="QueryParser\TokenMgrError.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\BooleanClause.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\BooleanQuery.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\BooleanScorer.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\BooleanScorer2.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\CachingSpanFilter.cs" />\r
+    <Compile Include="Search\CachingWrapperFilter.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\Collector.cs" />\r
+    <Compile Include="Search\ComplexExplanation.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\ConjunctionScorer.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\ConstantScoreQuery.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\ConstantScoreRangeQuery.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\DefaultSimilarity.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\DisjunctionMaxQuery.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\DisjunctionMaxScorer.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\DisjunctionSumScorer.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\DocIdSet.cs" />\r
+    <Compile Include="Search\DocIdSetIterator.cs" />\r
+    <Compile Include="Search\ExactPhraseScorer.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\Explanation.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\ExtendedFieldCache.cs" />\r
+    <Compile Include="Search\FieldCache.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\FieldCacheImpl.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\FieldCacheRangeFilter.cs" />\r
+    <Compile Include="Search\FieldCacheTermsFilter.cs" />\r
+    <Compile Include="Search\FieldComparator.cs" />\r
+    <Compile Include="Search\FieldComparatorSource.cs" />\r
+    <Compile Include="Search\FieldDoc.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\FieldDocSortedHitQueue.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\FieldSortedHitQueue.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\FieldValueHitQueue.cs" />\r
+    <Compile Include="Search\Filter.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\FilteredDocIdSet.cs" />\r
+    <Compile Include="Search\FilteredDocIdSetIterator.cs" />\r
+    <Compile Include="Search\FilteredQuery.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\FilteredTermEnum.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\FilterManager.cs" />\r
+    <Compile Include="Search\Function\ByteFieldSource.cs" />\r
+    <Compile Include="Search\Function\CustomScoreProvider.cs" />\r
+    <Compile Include="Search\Function\CustomScoreQuery.cs" />\r
+    <Compile Include="Search\Function\DocValues.cs" />\r
+    <Compile Include="Search\Function\FieldCacheSource.cs" />\r
+    <Compile Include="Search\Function\FieldScoreQuery.cs" />\r
+    <Compile Include="Search\Function\FloatFieldSource.cs" />\r
+    <Compile Include="Search\Function\IntFieldSource.cs" />\r
+    <Compile Include="Search\Function\MultiValueSource.cs" />\r
+    <Compile Include="Search\Function\OrdFieldSource.cs" />\r
+    <Compile Include="Search\Function\ReverseOrdFieldSource.cs" />\r
+    <Compile Include="Search\Function\ShortFieldSource.cs" />\r
+    <Compile Include="Search\Function\ValueSource.cs" />\r
+    <Compile Include="Search\Function\ValueSourceQuery.cs" />\r
+    <Compile Include="Search\FuzzyQuery.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\FuzzyTermEnum.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\Hit.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\HitCollector.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\HitCollectorWrapper.cs" />\r
+    <Compile Include="Search\HitIterator.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\HitQueue.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\Hits.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\IndexSearcher.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\MatchAllDocsQuery.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\MultiPhraseQuery.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\MultiSearcher.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\MultiTermQuery.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\MultiTermQueryWrapperFilter.cs" />\r
+    <Compile Include="Search\NumericRangeFilter.cs" />\r
+    <Compile Include="Search\NumericRangeQuery.cs" />\r
+    <Compile Include="Search\ParallelMultiSearcher.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\Payloads\AveragePayloadFunction.cs" />\r
+    <Compile Include="Search\Payloads\BoostingTermQuery.cs" />\r
+    <Compile Include="Search\Payloads\MaxPayloadFunction.cs" />\r
+    <Compile Include="Search\Payloads\MinPayloadFunction.cs" />\r
+    <Compile Include="Search\Payloads\PayloadFunction.cs" />\r
+    <Compile Include="Search\Payloads\PayloadNearQuery.cs" />\r
+    <Compile Include="Search\Payloads\PayloadSpanUtil.cs" />\r
+    <Compile Include="Search\Payloads\PayloadTermQuery.cs" />\r
+    <Compile Include="Search\PhrasePositions.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\PhraseQuery.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\PhraseQueue.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\PhraseScorer.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\PositiveScoresOnlyCollector.cs" />\r
+    <Compile Include="Search\PrefixFilter.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\PrefixQuery.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\PrefixTermEnum.cs" />\r
+    <Compile Include="Search\Query.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\QueryFilter.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\QueryTermVector.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\QueryWrapperFilter.cs" />\r
+    <Compile Include="Search\RangeFilter.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\RangeQuery.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\ReqExclScorer.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\ReqOptSumScorer.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\ScoreCachingWrappingScorer.cs" />\r
+    <Compile Include="Search\ScoreDoc.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\ScoreDocComparator.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\Scorer.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\Searchable.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\Searcher.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\Similarity.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\SimilarityDelegator.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\SloppyPhraseScorer.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\Sort.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\SortComparator.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\SortComparatorSource.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\SortField.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\SpanFilter.cs" />\r
+    <Compile Include="Search\SpanFilterResult.cs" />\r
+    <Compile Include="Search\SpanQueryFilter.cs" />\r
+    <Compile Include="Search\Spans\FieldMaskingSpanQuery.cs" />\r
+    <Compile Include="Search\Spans\NearSpansOrdered.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\Spans\NearSpansUnordered.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\Spans\SpanFirstQuery.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\Spans\SpanNearQuery.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\Spans\SpanNotQuery.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\Spans\SpanOrQuery.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\Spans\SpanQuery.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\Spans\Spans.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\Spans\SpanScorer.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\Spans\SpanTermQuery.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\Spans\SpanWeight.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\Spans\TermSpans.cs" />\r
+    <Compile Include="Search\TermQuery.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\TermRangeFilter.cs" />\r
+    <Compile Include="Search\TermRangeQuery.cs" />\r
+    <Compile Include="Search\TermRangeTermEnum.cs" />\r
+    <Compile Include="Search\TermScorer.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\TimeLimitedCollector.cs" />\r
+    <Compile Include="Search\TimeLimitingCollector.cs" />\r
+    <Compile Include="Search\TopDocCollector.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\TopDocs.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\TopDocsCollector.cs" />\r
+    <Compile Include="Search\TopFieldCollector.cs" />\r
+    <Compile Include="Search\TopFieldDocCollector.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\TopFieldDocs.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\TopScoreDocCollector.cs" />\r
+    <Compile Include="Search\Weight.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\WildcardQuery.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Search\WildcardTermEnum.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Store\AlreadyClosedException.cs" />\r
+    <Compile Include="Store\BufferedIndexInput.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Store\BufferedIndexOutput.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Store\CheckSumIndexInput.cs" />\r
+    <Compile Include="Store\CheckSumIndexOutput.cs" />\r
+    <Compile Include="Store\Directory.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Store\FileSwitchDirectory.cs" />\r
+    <Compile Include="Store\FSDirectory.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Store\FSLockFactory.cs" />\r
+    <Compile Include="Store\IndexInput.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Store\IndexOutput.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Store\Lock.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Store\LockFactory.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Store\LockObtainFailedException.cs" />\r
+    <Compile Include="Store\LockReleaseFailedException.cs" />\r
+    <Compile Include="Store\LockStressTest.cs" />\r
+    <Compile Include="Store\LockVerifyServer.cs" />\r
+    <Compile Include="Store\MMapDirectory.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Store\NativeFSLockFactory.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Store\NIOFSDirectory.cs" />\r
+    <Compile Include="Store\NoLockFactory.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Store\NoSuchDirectoryException.cs" />\r
+    <Compile Include="Store\RAMDirectory.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Store\RAMFile.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Store\RAMInputStream.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Store\RAMOutputStream.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Store\SimpleFSDirectory.cs" />\r
+    <Compile Include="Store\SimpleFSLockFactory.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Store\SingleInstanceLockFactory.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Store\VerifyingLockFactory.cs" />\r
+    <Compile Include="SupportClass.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Util\ArrayUtil.cs" />\r
+    <Compile Include="Util\Attribute.cs" />\r
+    <Compile Include="Util\AttributeImpl.cs" />\r
+    <Compile Include="Util\AttributeSource.cs" />\r
+    <Compile Include="Util\AverageGuessMemoryModel.cs" />\r
+    <Compile Include="Util\BitUtil.cs" />\r
+    <Compile Include="Util\BitVector.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Util\CloseableThreadLocal.cs" />\r
+    <Compile Include="Util\Constants.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Util\DocIdBitSet.cs" />\r
+    <Compile Include="Util\FieldCacheSanityChecker.cs" />\r
+    <Compile Include="Util\IndexableBinaryStringTools.cs" />\r
+    <Compile Include="Util\MapOfSets.cs" />\r
+    <Compile Include="Util\MemoryModel.cs" />\r
+    <Compile Include="Util\NumericUtils.cs" />\r
+    <Compile Include="Util\OpenBitSet.cs" />\r
+    <Compile Include="Util\OpenBitSetDISI.cs" />\r
+    <Compile Include="Util\OpenBitSetIterator.cs" />\r
+    <Compile Include="Util\Parameter.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Util\PriorityQueue.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Util\Cache\Cache.cs" />\r
+    <Compile Include="Util\Cache\SimpleLRUCache.cs" />\r
+    <Compile Include="Util\Cache\SimpleMapCache.cs" />\r
+    <Compile Include="Util\RamUsageEstimator.cs" />\r
+    <Compile Include="Util\ReaderUtil.cs" />\r
+    <Compile Include="Util\ScorerDocQueue.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Util\SimpleStringInterner.cs" />\r
+    <Compile Include="Util\SmallFloat.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Util\SortedVIntList.cs" />\r
+    <Compile Include="Util\SorterTemplate.cs" />\r
+    <Compile Include="Util\StringHelper.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Util\StringInterner.cs" />\r
+    <Compile Include="Util\ToStringUtils.cs">\r
+      <SubType>Code</SubType>\r
+    </Compile>\r
+    <Compile Include="Util\UnicodeUtil.cs" />\r
+    <Compile Include="Util\Version.cs" />\r
+    <None Include="Analysis\Standard\StandardTokenizerImpl.jflex" />\r
+    <None Include="Lucene.Net.Search.RemoteSearchable.config" />\r
+    <None Include="Lucene.Net.Search.TestSort.config" />\r
+    <None Include="Lucene.Net.snk" />\r
+    <None Include="QueryParser\QueryParser.jj" />\r
+    <Content Include="Analysis\Package.html" />\r
+    <Content Include="Analysis\Standard\Package.html" />\r
+    <Content Include="Document\Package.html" />\r
+    <Content Include="Index\Package.html" />\r
+    <Content Include="Messages\Package.html" />\r
+    <Content Include="Overview.html" />\r
+    <Content Include="Package.html" />\r
+    <Content Include="QueryParser\Package.html" />\r
+    <Content Include="Search\Function\Package.html" />\r
+    <Content Include="Search\Package.html" />\r
+    <Content Include="Search\Payloads\Package.html" />\r
+    <Content Include="Search\Spans\Package.html" />\r
+    <Content Include="Store\Package.html" />\r
+    <Content Include="Util\Package.html" />\r
+  </ItemGroup>\r
+  <ItemGroup>\r
+    <BootstrapperPackage Include=".NETFramework,Version=v4.0">\r
+      <Visible>False</Visible>\r
+      <ProductName>Microsoft .NET Framework 4 %28x86 and x64%29</ProductName>\r
+      <Install>true</Install>\r
+    </BootstrapperPackage>\r
+    <BootstrapperPackage Include="Microsoft.Net.Client.3.5">\r
+      <Visible>False</Visible>\r
+      <ProductName>.NET Framework 3.5 SP1 Client Profile</ProductName>\r
+      <Install>false</Install>\r
+    </BootstrapperPackage>\r
+    <BootstrapperPackage Include="Microsoft.Net.Framework.3.5.SP1">\r
+      <Visible>False</Visible>\r
+      <ProductName>.NET Framework 3.5 SP1</ProductName>\r
+      <Install>false</Install>\r
+    </BootstrapperPackage>\r
+    <BootstrapperPackage Include="Microsoft.Windows.Installer.3.1">\r
+      <Visible>False</Visible>\r
+      <ProductName>Windows Installer 3.1</ProductName>\r
+      <Install>true</Install>\r
+    </BootstrapperPackage>\r
+  </ItemGroup>\r
+  <Import Project="$(MSBuildBinPath)\Microsoft.CSharp.targets" />\r
+  <PropertyGroup>\r
+    <PreBuildEvent>\r
+    </PreBuildEvent>\r
+    <PostBuildEvent>\r
+    </PostBuildEvent>\r
+  </PropertyGroup>\r
+</Project>
\ No newline at end of file
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Lucene.Net.ndoc b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Lucene.Net.ndoc
new file mode 100644 (file)
index 0000000..44f5906
--- /dev/null
@@ -0,0 +1,61 @@
+<?xml version='1.0'?>\r
+<!--\r
+\r
+ Licensed to the Apache Software Foundation (ASF) under one\r
+ or more contributor license agreements.  See the NOTICE file\r
+ distributed with this work for additional information\r
+ regarding copyright ownership.  The ASF licenses this file\r
+ to you under the Apache License, Version 2.0 (the\r
+ "License"); you may not use this file except in compliance\r
+ with the License.  You may obtain a copy of the License at\r
+\r
+   http://www.apache.org/licenses/LICENSE-2.0\r
+\r
+ Unless required by applicable law or agreed to in writing,\r
+ software distributed under the License is distributed on an\r
+ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\r
+ KIND, either express or implied.  See the License for the\r
+ specific language governing permissions and limitations\r
+ under the License.\r
+\r
+-->\r
+\r
+<project SchemaVersion="1.3">\r
+    <assemblies>\r
+        <assembly location=".\bin\Release\Lucene.Net.dll" documentation=".\Lucene.Net.xml" />\r
+    </assemblies>\r
+    <documenters>\r
+        <documenter name="JavaDoc">\r
+            <property name="OutputDirectory" value=".\doc\" />\r
+        </documenter>\r
+        <documenter name="LaTeX">\r
+            <property name="OutputDirectory" value=".\doc\" />\r
+            <property name="TextFileFullName" value="Documentation.tex" />\r
+            <property name="TexFileBaseName" value="Documentation" />\r
+            <property name="LatexCompiler" value="latex" />\r
+            <property name="TexFileFullPath" value=".\doc\Documentation.tex" />\r
+        </documenter>\r
+        <documenter name="LinearHtml">\r
+            <property name="OutputDirectory" value=".\doc\" />\r
+            <property name="Title" value="An NDoc Documented Class Library" />\r
+        </documenter>\r
+        <documenter name="MSDN">\r
+            <property name="OutputDirectory" value=".\Docs\" />\r
+            <property name="HtmlHelpName" value="Apache Lucene.Net 2.9.0 API Documentation" />\r
+            <property name="Title" value="Apache Lucene.Net 2.9.0 Class Library API" />\r
+            <property name="AssemblyVersionInfo" value="AssemblyVersion" />\r
+        </documenter>\r
+        <documenter name="MSDN 2003">\r
+            <property name="OutputDirectory" value=".\doc\" />\r
+            <property name="Title" value="An NDoc Documented Class Library" />\r
+        </documenter>\r
+        <documenter name="VS.NET 2003">\r
+            <property name="OutputDirectory" value=".\doc\" />\r
+            <property name="HtmlHelpName" value="Documentation" />\r
+            <property name="Title" value="An NDoc documented library" />\r
+        </documenter>\r
+        <documenter name="XML">\r
+            <property name="OutputFile" value=".\doc\doc.xml" />\r
+        </documenter>\r
+    </documenters>\r
+</project>
\ No newline at end of file
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Lucene.Net.snk b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Lucene.Net.snk
new file mode 100644 (file)
index 0000000..f7f9ee5
Binary files /dev/null and b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Lucene.Net.snk differ
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/LucenePackage.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/LucenePackage.cs
new file mode 100644 (file)
index 0000000..8962f8d
--- /dev/null
@@ -0,0 +1,40 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net
+{
+       
+       /// <summary>Lucene's package information, including version. *</summary>
+       public sealed class LucenePackage
+       {
+               
+               private LucenePackage()
+               {
+               } // can't construct
+               
+               /// <summary>Return Lucene's package, including version information. </summary>
+               // {{Aroush-1.9}}
+        /*
+               public static Package Get()
+               {
+                       return typeof(LucenePackage).getPackage();
+               }
+        */
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Messages/.gitattributes b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Messages/.gitattributes
new file mode 100644 (file)
index 0000000..f58ced4
--- /dev/null
@@ -0,0 +1,5 @@
+/Message.cs -crlf
+/MessageImpl.cs -crlf
+/NLS.cs -crlf
+/NLSException.cs -crlf
+/Package.html -crlf
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Messages/Message.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Messages/Message.cs
new file mode 100644 (file)
index 0000000..2c26cf1
--- /dev/null
@@ -0,0 +1,37 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Messages
+{
+       
+       /// <summary> Message Interface for a lazy loading.
+       /// For Native Language Support (NLS), system of software internationalization.
+       /// </summary>
+       public interface Message
+       {
+               
+               System.String GetKey();
+               
+               System.Object[] GetArguments();
+               
+               System.String GetLocalizedMessage();
+               
+               System.String GetLocalizedMessage(System.Globalization.CultureInfo locale);
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Messages/MessageImpl.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Messages/MessageImpl.cs
new file mode 100644 (file)
index 0000000..f29d134
--- /dev/null
@@ -0,0 +1,80 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Messages
+{
+       
+       /// <summary> Default implementation of Message interface.
+       /// For Native Language Support (NLS), system of software internationalization.
+       /// </summary>
+       [Serializable]
+       public class MessageImpl : Message
+       {
+               
+               private const long serialVersionUID = - 3077643314630884523L;
+               
+               private System.String key;
+               
+               private System.Object[] arguments = new System.Object[0];
+               
+               public MessageImpl(System.String key)
+               {
+                       this.key = key;
+               }
+               
+               public MessageImpl(System.String key, System.Object[] args):this(key)
+               {
+                       this.arguments = args;
+               }
+               
+               public virtual System.Object[] GetArguments()
+               {
+                       return this.arguments;
+               }
+               
+               public virtual System.String GetKey()
+               {
+                       return this.key;
+               }
+               
+               public virtual System.String GetLocalizedMessage()
+               {
+                       return GetLocalizedMessage(System.Threading.Thread.CurrentThread.CurrentCulture);
+               }
+               
+               public virtual System.String GetLocalizedMessage(System.Globalization.CultureInfo locale)
+               {
+                       return NLS.GetLocalizedMessage(GetKey(), locale, GetArguments());
+               }
+               
+               public override System.String ToString()
+               {
+                       System.Object[] args = GetArguments();
+                       System.String argsString = "";
+                       if (args != null)
+                       {
+                               for (int i = 0; i < args.Length; i++)
+                               {
+                                       argsString += (args[i] + (i < args.Length?"":", "));
+                               }
+                       }
+                       return GetKey() + " " + argsString;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Messages/NLS.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Messages/NLS.cs
new file mode 100644 (file)
index 0000000..80ba956
--- /dev/null
@@ -0,0 +1,253 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Messages
+{
+       
+       /// <summary> MessageBundles classes extend this class, to implement a bundle.
+       /// 
+       /// For Native Language Support (NLS), system of software internationalization.
+       /// 
+       /// This interface is similar to the NLS class in eclipse.osgi.util.NLS class -
+       /// initializeMessages() method resets the values of all static strings, should
+       /// only be called by classes that extend from NLS (see TestMessages.java for
+       /// reference) - performs validation of all message in a bundle, at class load
+       /// time - performs per message validation at runtime - see NLSTest.java for
+       /// usage reference
+       /// 
+       /// MessageBundle classes may subclass this type.
+       /// </summary>
+       public class NLS
+       {
+               public interface IPriviligedAction
+               {
+                       /// <summary>
+                       /// Performs the priviliged action.
+                       /// </summary>
+                       /// <returns>A value that may represent the result of the action.</returns>
+                       System.Object Run();
+               }
+
+               private class AnonymousClassPrivilegedAction : IPriviligedAction
+               {
+                       public AnonymousClassPrivilegedAction(System.Reflection.FieldInfo field)
+                       {
+                               InitBlock(field);
+                       }
+                       private void  InitBlock(System.Reflection.FieldInfo field)
+                       {
+                               this.field = field;
+                       }
+                       private System.Reflection.FieldInfo field;
+                       public virtual System.Object Run()
+                       {
+                // field.setAccessible(true); // {{Aroush-2.9}} java.lang.reflect.AccessibleObject.setAccessible
+                               return null;
+                       }
+               }
+               
+               private static System.Collections.IDictionary bundles = new System.Collections.Hashtable(0);
+               
+               protected internal NLS()
+               {
+                       // Do not instantiate
+               }
+               
+               public static System.String GetLocalizedMessage(System.String key)
+               {
+                       return GetLocalizedMessage(key, System.Threading.Thread.CurrentThread.CurrentCulture);
+               }
+               
+               public static System.String GetLocalizedMessage(System.String key, System.Globalization.CultureInfo locale)
+               {
+                       System.Object message = GetResourceBundleObject(key, locale);
+                       if (message == null)
+                       {
+                               return "Message with key:" + key + " and locale: " + locale + " not found.";
+                       }
+                       return message.ToString();
+               }
+               
+               public static System.String GetLocalizedMessage(System.String key, System.Globalization.CultureInfo locale, System.Object[] args)
+               {
+                       System.String str = GetLocalizedMessage(key, locale);
+                       
+                       if (args.Length > 0)
+                       {
+                               str = System.String.Format(str, args);
+                       }
+                       
+                       return str;
+               }
+               
+               public static System.String GetLocalizedMessage(System.String key, System.Object[] args)
+               {
+                       return GetLocalizedMessage(key, System.Threading.Thread.CurrentThread.CurrentCulture, args);
+               }
+               
+               /// <summary> Initialize a given class with the message bundle Keys Should be called from
+               /// a class that extends NLS in a static block at class load time.
+               /// 
+               /// </summary>
+               /// <param name="bundleName">Property file with that contains the message bundle
+               /// </param>
+               /// <param name="clazz">where constants will reside
+               /// </param>
+               //@SuppressWarnings("unchecked")
+               protected internal static void  InitializeMessages(System.String bundleName, System.Type clazz)
+               {
+                       try
+                       {
+                               Load(clazz);
+                               if (!bundles.Contains(bundleName))
+                                       bundles[bundleName] = clazz;
+                       }
+                       catch (System.Exception e)
+                       {
+                               // ignore all errors and exceptions
+                               // because this function is supposed to be called at class load time.
+                       }
+               }
+               
+               private static System.Object GetResourceBundleObject(System.String messageKey, System.Globalization.CultureInfo locale)
+               {
+                       
+                       // slow resource checking
+                       // need to loop thru all registered resource bundles
+                       for (System.Collections.IEnumerator it = bundles.Keys.GetEnumerator(); it.MoveNext(); )
+                       {
+                               System.Type clazz = (System.Type) bundles[(System.String) it.Current];
+                               System.Threading.Thread.CurrentThread.CurrentUICulture = locale;
+                System.Resources.ResourceManager resourceBundle = System.Resources.ResourceManager.CreateFileBasedResourceManager(clazz.Name, "Messages", null); //{{Mono.Lucene.Net-2.9.1}} Can we make resourceDir "Messages" more general?
+                               if (resourceBundle != null)
+                               {
+                                       try
+                                       {
+                                               System.Object obj = resourceBundle.GetObject(messageKey);
+                                               if (obj != null)
+                                                       return obj;
+                                       }
+                                       catch (System.Resources.MissingManifestResourceException e)
+                                       {
+                                               // just continue it might be on the next resource bundle
+                                       }
+                               }
+                       }
+                       // if resource is not found
+                       return null;
+               }
+               
+               /// <param name="clazz">
+               /// </param>
+               private static void  Load(System.Type clazz)
+               {
+                       System.Reflection.FieldInfo[] fieldArray = clazz.GetFields(System.Reflection.BindingFlags.Instance | System.Reflection.BindingFlags.NonPublic | System.Reflection.BindingFlags.Public | System.Reflection.BindingFlags.DeclaredOnly | System.Reflection.BindingFlags.Static);
+                       
+                       bool isFieldAccessible = clazz.IsPublic;
+                       
+                       // build a map of field names to Field objects
+                       int len = fieldArray.Length;
+                       System.Collections.IDictionary fields = new System.Collections.Hashtable(len * 2);
+                       for (int i = 0; i < len; i++)
+                       {
+                               fields[fieldArray[i].Name] = fieldArray[i];
+                               LoadfieldValue(fieldArray[i], isFieldAccessible, clazz);
+                       }
+               }
+               
+               /// <param name="field">
+               /// </param>
+               /// <param name="isFieldAccessible">
+               /// </param>
+               private static void  LoadfieldValue(System.Reflection.FieldInfo field, bool isFieldAccessible, System.Type clazz)
+               {
+            /*
+                       int MOD_EXPECTED = Modifier.PUBLIC | Modifier.STATIC;
+                       int MOD_MASK = MOD_EXPECTED | Modifier.FINAL;
+                       if ((field.getModifiers() & MOD_MASK) != MOD_EXPECTED)
+                               return ;
+            */
+            if (!(field.IsPublic || field.IsStatic))
+                return ;
+                       
+                       // Set a value for this empty field.
+                       if (!isFieldAccessible)
+                               MakeAccessible(field);
+                       try
+                       {
+                               field.SetValue(null, field.Name);
+                               ValidateMessage(field.Name, clazz);
+                       }
+                       catch (System.ArgumentException e)
+                       {
+                               // should not happen
+                       }
+                       catch (System.UnauthorizedAccessException e)
+                       {
+                               // should not happen
+                       }
+               }
+               
+               /// <param name="key">- Message Key
+               /// </param>
+               private static void  ValidateMessage(System.String key, System.Type clazz)
+               {
+                       // Test if the message is present in the resource bundle
+                       try
+                       {
+                               System.Threading.Thread.CurrentThread.CurrentUICulture = System.Threading.Thread.CurrentThread.CurrentCulture;
+                               System.Resources.ResourceManager resourceBundle = System.Resources.ResourceManager.CreateFileBasedResourceManager(clazz.FullName, "", null);
+                               if (resourceBundle != null)
+                               {
+                                       System.Object obj = resourceBundle.GetObject(key);
+                                       if (obj == null)
+                                       {
+                                               System.Console.Error.WriteLine("WARN: Message with key:" + key + " and locale: " + System.Threading.Thread.CurrentThread.CurrentCulture + " not found.");
+                                       }
+                               }
+                       }
+                       catch (System.Resources.MissingManifestResourceException e)
+                       {
+                               System.Console.Error.WriteLine("WARN: Message with key:" + key + " and locale: " + System.Threading.Thread.CurrentThread.CurrentCulture + " not found.");
+                       }
+                       catch (System.Exception e)
+                       {
+                               // ignore all other errors and exceptions
+                               // since this code is just a test to see if the message is present on the
+                               // system
+                       }
+               }
+               
+               /*
+               * Make a class field accessible
+               */
+               //@SuppressWarnings("unchecked")
+               private static void  MakeAccessible(System.Reflection.FieldInfo field)
+               {
+                       if (System.Security.SecurityManager.SecurityEnabled)
+                       {
+                               //field.setAccessible(true);   // {{Aroush-2.9}} java.lang.reflect.AccessibleObject.setAccessible
+                       }
+                       else
+                       {
+                //AccessController.doPrivileged(new AnonymousClassPrivilegedAction(field));     // {{Aroush-2.9}} java.security.AccessController.doPrivileged
+                       }
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Messages/NLSException.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Messages/NLSException.cs
new file mode 100644 (file)
index 0000000..29286c8
--- /dev/null
@@ -0,0 +1,37 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Messages
+{
+       
+       /// <summary> Interface that exceptions should implement to support lazy loading of messages.
+       /// 
+       /// For Native Language Support (NLS), system of software internationalization.
+       /// 
+       /// This Interface should be implemented by all exceptions that require
+       /// translation
+       /// 
+       /// </summary>
+       public interface NLSException
+       {
+               /// <returns> a instance of a class that implements the Message interface
+               /// </returns>
+               Message GetMessageObject();
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Messages/Package.html b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Messages/Package.html
new file mode 100644 (file)
index 0000000..d971ae7
--- /dev/null
@@ -0,0 +1,99 @@
+<!doctype html public "-//w3c//dtd html 4.0 transitional//en">\r
+<!--\r
+ Licensed to the Apache Software Foundation (ASF) under one or more\r
+ contributor license agreements.  See the NOTICE file distributed with\r
+ this work for additional information regarding copyright ownership.\r
+ The ASF licenses this file to You under the Apache License, Version 2.0\r
+ (the "License"); you may not use this file except in compliance with\r
+ the License.  You may obtain a copy of the License at\r
+\r
+     http://www.apache.org/licenses/LICENSE-2.0\r
+\r
+ Unless required by applicable law or agreed to in writing, software\r
+ distributed under the License is distributed on an "AS IS" BASIS,\r
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
+ See the License for the specific language governing permissions and\r
+ limitations under the License.\r
+-->\r
+<html>\r
+<head>\r
+   <meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">\r
+</head>\r
+<body>\r
+\r
+For Native Language Support (NLS), system of software internationalization.\r
+\r
+<h2>NLS message API</h2>\r
+<p>\r
+This utility API, adds support for NLS messages in the apache code.\r
+It is currently used by the lucene "New Flexible Query PArser".\r
+</p>\r
+<p>\r
+Features:\r
+    <ol>\r
+        <li>Message reference in the code, using static Strings</li>\r
+        <li>Message resource validation at class load time, for easier debugging</li>\r
+        <li>Allows for message IDs to be re-factored using eclipse or other code re-factor tools</li>\r
+        <li>Allows for reference count on messages, just like code</li>\r
+               <li>Lazy loading of Message Strings</li>        \r
+        <li>Normal loading Message Strings</li>                  \r
+    </ol>\r
+</p>\r
+\r
+<br/>\r
+<br/>\r
+<p>\r
+Lazy loading of Message Strings\r
+\r
+<pre>\r
+       public class MessagesTestBundle extends NLS {\r
+       \r
+         private static final String BUNDLE_NAME = MessagesTestBundle.class.getName();\r
+       \r
+         private MessagesTestBundle() {\r
+           // should never be instantiated\r
+         }\r
+       \r
+         static {\r
+           // register all string ids with NLS class and initialize static string\r
+           // values\r
+           NLS.initializeMessages(BUNDLE_NAME, MessagesTestBundle.class);\r
+         }\r
+       \r
+         // static string must match the strings in the property files.\r
+         public static String Q0001E_INVALID_SYNTAX;\r
+         public static String Q0004E_INVALID_SYNTAX_ESCAPE_UNICODE_TRUNCATION;\r
+       \r
+         // this message is missing from the properties file\r
+         public static String Q0005E_MESSAGE_NOT_IN_BUNDLE;\r
+       }\r
+\r
+    // Create a message reference\r
+    Message invalidSyntax = new MessageImpl(MessagesTestBundle.Q0001E_INVALID_SYNTAX, "XXX");\r
+    \r
+    // Do other stuff in the code...\r
+    // when is time to display the message to the user or log the message on a file\r
+    // the message is loaded from the correct bundle\r
+    \r
+    String message1 = invalidSyntax.getLocalizedMessage();\r
+    String message2 = invalidSyntax.getLocalizedMessage(Locale.JAPANESE);\r
+</pre>\r
+</p>\r
+\r
+<br/>\r
+<br/>\r
+<p>\r
+Normal loading of Message Strings\r
+\r
+<pre>\r
+       String message1 = NLS.getLocalizedMessage(MessagesTestBundle.Q0004E_INVALID_SYNTAX_ESCAPE_UNICODE_TRUNCATION);\r
+       String message2 = NLS.getLocalizedMessage(MessagesTestBundle.Q0004E_INVALID_SYNTAX_ESCAPE_UNICODE_TRUNCATION, Locale.JAPANESE);\r
+</pre>\r
+</p>\r
+\r
+<p>\r
+The org.apache.lucene.messages.TestNLS junit contains several other examples.\r
+The TestNLS java code is available from the Apache Lucene code repository.\r
+</p>\r
+</body>\r
+</html>\r
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Overview.html b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Overview.html
new file mode 100644 (file)
index 0000000..4e38b8a
--- /dev/null
@@ -0,0 +1,222 @@
+<!--\r
+ Licensed to the Apache Software Foundation (ASF) under one or more\r
+ contributor license agreements.  See the NOTICE file distributed with\r
+ this work for additional information regarding copyright ownership.\r
+ The ASF licenses this file to You under the Apache License, Version 2.0\r
+ (the "License"); you may not use this file except in compliance with\r
+ the License.  You may obtain a copy of the License at\r
+\r
+     http://www.apache.org/licenses/LICENSE-2.0\r
+\r
+ Unless required by applicable law or agreed to in writing, software\r
+ distributed under the License is distributed on an "AS IS" BASIS,\r
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
+ See the License for the specific language governing permissions and\r
+ limitations under the License.\r
+-->\r
+<html>\r
+<!--\r
+ Licensed to the Apache Software Foundation (ASF) under one or more\r
+ contributor license agreements.  See the NOTICE file distributed with\r
+ this work for additional information regarding copyright ownership.\r
+ The ASF licenses this file to You under the Apache License, Version 2.0\r
+ (the "License"); you may not use this file except in compliance with\r
+ the License.  You may obtain a copy of the License at\r
+\r
+     http://www.apache.org/licenses/LICENSE-2.0\r
+\r
+ Unless required by applicable law or agreed to in writing, software\r
+ distributed under the License is distributed on an "AS IS" BASIS,\r
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
+ See the License for the specific language governing permissions and\r
+ limitations under the License.\r
+-->\r
+<head>\r
+   <title>Apache Lucene API</title>\r
+</head>\r
+<body>\r
+\r
+<p>Apache Lucene is a high-performance, full-featured text search engine library.\r
+Here's a simple example how to use Lucene for indexing and searching (using JUnit\r
+to check if the results are what we expect):</p>\r
+\r
+<!-- code comes from Lucene.Net.TestDemo: -->\r
+<!-- ======================================================== -->\r
+<!-- = Java Sourcecode to HTML automatically converted code = -->\r
+<!-- =   Java2Html Converter 5.0 [2006-03-04] by Markus Gebhard  markus@jave.de   = -->\r
+<!-- =     Further information: http://www.java2html.de     = -->\r
+<div align="left" class="java">\r
+<table border="0" cellpadding="3" cellspacing="0" bgcolor="#ffffff">\r
+   <tr>\r
+  <!-- start source code -->\r
+   <td nowrap="nowrap" valign="top" align="left">\r
+    <code>\r
+<font color="#ffffff">&nbsp;&nbsp;&nbsp;&nbsp;</font><font color="#000000">Analyzer&nbsp;analyzer&nbsp;=&nbsp;</font><font color="#7f0055"><b>new&nbsp;</b></font><font color="#000000">StandardAnalyzer</font><font color="#000000">(</font><font color="#000000">Version.LUCENE_CURRENT</font><font color="#000000">)</font><font color="#000000">;</font><br />\r
+<font color="#ffffff"></font><br />\r
+<font color="#ffffff">&nbsp;&nbsp;&nbsp;&nbsp;</font><font color="#3f7f5f">//&nbsp;Store&nbsp;the&nbsp;index&nbsp;in&nbsp;memory:</font><br />\r
+<font color="#ffffff">&nbsp;&nbsp;&nbsp;&nbsp;</font><font color="#000000">Directory&nbsp;directory&nbsp;=&nbsp;</font><font color="#7f0055"><b>new&nbsp;</b></font><font color="#000000">RAMDirectory</font><font color="#000000">()</font><font color="#000000">;</font><br />\r
+<font color="#ffffff">&nbsp;&nbsp;&nbsp;&nbsp;</font><font color="#3f7f5f">//&nbsp;To&nbsp;store&nbsp;an&nbsp;index&nbsp;on&nbsp;disk,&nbsp;use&nbsp;this&nbsp;instead:</font><br />\r
+<font color="#ffffff">&nbsp;&nbsp;&nbsp;&nbsp;</font><font color="#3f7f5f">//Directory&nbsp;directory&nbsp;=&nbsp;FSDirectory.open(&#34;/tmp/testindex&#34;);</font><br />\r
+<font color="#ffffff">&nbsp;&nbsp;&nbsp;&nbsp;</font><font color="#000000">IndexWriter&nbsp;iwriter&nbsp;=&nbsp;</font><font color="#7f0055"><b>new&nbsp;</b></font><font color="#000000">IndexWriter</font><font color="#000000">(</font><font color="#000000">directory,&nbsp;analyzer,&nbsp;true,</font><br />\r
+<font color="#ffffff">&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</font><font color="#7f0055"><b>new&nbsp;</b></font><font color="#000000">IndexWriter.MaxFieldLength</font><font color="#000000">(</font><font color="#990000">25000</font><font color="#000000">))</font><font color="#000000">;</font><br />\r
+<font color="#ffffff">&nbsp;&nbsp;&nbsp;&nbsp;</font><font color="#000000">Document&nbsp;doc&nbsp;=&nbsp;</font><font color="#7f0055"><b>new&nbsp;</b></font><font color="#000000">Document</font><font color="#000000">()</font><font color="#000000">;</font><br />\r
+<font color="#ffffff">&nbsp;&nbsp;&nbsp;&nbsp;</font><font color="#000000">String&nbsp;text&nbsp;=&nbsp;</font><font color="#2a00ff">&#34;This&nbsp;is&nbsp;the&nbsp;text&nbsp;to&nbsp;be&nbsp;indexed.&#34;</font><font color="#000000">;</font><br />\r
+<font color="#ffffff">&nbsp;&nbsp;&nbsp;&nbsp;</font><font color="#000000">doc.add</font><font color="#000000">(</font><font color="#7f0055"><b>new&nbsp;</b></font><font color="#000000">Field</font><font color="#000000">(</font><font color="#2a00ff">&#34;fieldname&#34;</font><font color="#000000">,&nbsp;text,&nbsp;Field.Store.YES,</font><br />\r
+<font color="#ffffff">&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</font><font color="#000000">Field.Index.ANALYZED</font><font color="#000000">))</font><font color="#000000">;</font><br />\r
+<font color="#ffffff">&nbsp;&nbsp;&nbsp;&nbsp;</font><font color="#000000">iwriter.addDocument</font><font color="#000000">(</font><font color="#000000">doc</font><font color="#000000">)</font><font color="#000000">;</font><br />\r
+<font color="#ffffff">&nbsp;&nbsp;&nbsp;&nbsp;</font><font color="#000000">iwriter.close</font><font color="#000000">()</font><font color="#000000">;</font><br />\r
+<font color="#ffffff">&nbsp;&nbsp;&nbsp;&nbsp;</font><br />\r
+<font color="#ffffff">&nbsp;&nbsp;&nbsp;&nbsp;</font><font color="#3f7f5f">//&nbsp;Now&nbsp;search&nbsp;the&nbsp;index:</font><br />\r
+<font color="#ffffff">&nbsp;&nbsp;&nbsp;&nbsp;</font><font color="#000000">IndexSearcher&nbsp;isearcher&nbsp;=&nbsp;</font><font color="#7f0055"><b>new&nbsp;</b></font><font color="#000000">IndexSearcher</font><font color="#000000">(</font><font color="#000000">directory,&nbsp;</font><font color="#7f0055"><b>true</b></font><font color="#000000">)</font><font color="#000000">;&nbsp;</font><font color="#3f7f5f">//&nbsp;read-only=true</font><br />\r
+<font color="#ffffff">&nbsp;&nbsp;&nbsp;&nbsp;</font><font color="#3f7f5f">//&nbsp;Parse&nbsp;a&nbsp;simple&nbsp;query&nbsp;that&nbsp;searches&nbsp;for&nbsp;&#34;text&#34;:</font><br />\r
+<font color="#ffffff">&nbsp;&nbsp;&nbsp;&nbsp;</font><font color="#000000">QueryParser&nbsp;parser&nbsp;=&nbsp;</font><font color="#7f0055"><b>new&nbsp;</b></font><font color="#000000">QueryParser</font><font color="#000000">(</font><font color="#2a00ff">&#34;fieldname&#34;</font><font color="#000000">,&nbsp;analyzer</font><font color="#000000">)</font><font color="#000000">;</font><br />\r
+<font color="#ffffff">&nbsp;&nbsp;&nbsp;&nbsp;</font><font color="#000000">Query&nbsp;query&nbsp;=&nbsp;parser.parse</font><font color="#000000">(</font><font color="#2a00ff">&#34;text&#34;</font><font color="#000000">)</font><font color="#000000">;</font><br />\r
+<font color="#ffffff">&nbsp;&nbsp;&nbsp;&nbsp;</font><font color="#000000">ScoreDoc</font><font color="#000000">[]&nbsp;</font><font color="#000000">hits&nbsp;=&nbsp;isearcher.search</font><font color="#000000">(</font><font color="#000000">query,&nbsp;null,&nbsp;</font><font color="#990000">1000</font><font color="#000000">)</font><font color="#000000">.scoreDocs;</font><br />\r
+<font color="#ffffff">&nbsp;&nbsp;&nbsp;&nbsp;</font><font color="#000000">assertEquals</font><font color="#000000">(</font><font color="#990000">1</font><font color="#000000">,&nbsp;hits.length</font><font color="#000000">)</font><font color="#000000">;</font><br />\r
+<font color="#ffffff">&nbsp;&nbsp;&nbsp;&nbsp;</font><font color="#3f7f5f">//&nbsp;Iterate&nbsp;through&nbsp;the&nbsp;results:</font><br />\r
+<font color="#ffffff">&nbsp;&nbsp;&nbsp;&nbsp;</font><font color="#7f0055"><b>for&nbsp;</b></font><font color="#000000">(</font><font color="#7f0055"><b>int&nbsp;</b></font><font color="#000000">i&nbsp;=&nbsp;</font><font color="#990000">0</font><font color="#000000">;&nbsp;i&nbsp;&lt;&nbsp;hits.length;&nbsp;i++</font><font color="#000000">)&nbsp;{</font><br />\r
+<font color="#ffffff">&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</font><font color="#000000">Document&nbsp;hitDoc&nbsp;=&nbsp;isearcher.doc</font><font color="#000000">(</font><font color="#000000">hits</font><font color="#000000">[</font><font color="#000000">i</font><font color="#000000">]</font><font color="#000000">.doc</font><font color="#000000">)</font><font color="#000000">;</font><br />\r
+<font color="#ffffff">&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</font><font color="#000000">assertEquals</font><font color="#000000">(</font><font color="#2a00ff">&#34;This&nbsp;is&nbsp;the&nbsp;text&nbsp;to&nbsp;be&nbsp;indexed.&#34;</font><font color="#000000">,&nbsp;hitDoc.get</font><font color="#000000">(</font><font color="#2a00ff">&#34;fieldname&#34;</font><font color="#000000">))</font><font color="#000000">;</font><br />\r
+<font color="#ffffff">&nbsp;&nbsp;&nbsp;&nbsp;</font><font color="#000000">}</font><br />\r
+<font color="#ffffff">&nbsp;&nbsp;&nbsp;&nbsp;</font><font color="#000000">isearcher.close</font><font color="#000000">()</font><font color="#000000">;</font><br />\r
+<font color="#ffffff">&nbsp;&nbsp;&nbsp;&nbsp;</font><font color="#000000">directory.close</font><font color="#000000">()</font><font color="#000000">;</font></code>\r
+    \r
+   </td>\r
+  <!-- end source code -->\r
+   </tr>\r
+\r
+</table>\r
+</div>\r
+<!-- =       END of automatically generated HTML code       = -->\r
+<!-- ======================================================== -->\r
+\r
+\r
+\r
+<p>The Lucene API is divided into several packages:</p>\r
+\r
+<ul>\r
+<li>\r
+<b><a href = "org/apache/lucene/analysis/package-summary.html">Lucene.Net.Analysis</a></b>\r
+defines an abstract <a href = "org/apache/lucene/analysis/Analyzer.html">Analyzer</a>\r
+API for converting text from a <a href = "http://java.sun.com//products/jdk/1.2/docs/api/java/io/Reader.html">java.io.Reader</a>\r
+into a <a href = "org/apache/lucene/analysis/TokenStream.html">TokenStream</a>,\r
+an enumeration of token <a href = "org/apache/lucene/util/Attribute.html">Attribute</a>s.&nbsp;\r
+A TokenStream can be composed by applying <a href = "org/apache/lucene/analysis/TokenFilter.html">TokenFilter</a>s\r
+to the output of a <a href = "org/apache/lucene/analysis/Tokenizer.html">Tokenizer</a>.&nbsp;\r
+Tokenizers and TokenFilters are strung together and applied with an <a href = "org/apache/lucene/analysis/Analyzer.html">Analyzer</a>.&nbsp;\r
+A handful of Analyzer implementations are provided, including <a href = "org/apache/lucene/analysis/StopAnalyzer.html">StopAnalyzer</a>\r
+and the grammar-based <a href = "org/apache/lucene/analysis/standard/StandardAnalyzer.html">StandardAnalyzer</a>.</li>\r
+\r
+<li>\r
+<b><a href = "org/apache/lucene/document/package-summary.html">Lucene.Net.Documents</a></b>\r
+provides a simple <a href = "org/apache/lucene/document/Document.html">Document</a>\r
+class.&nbsp; A Document is simply a set of named <a href = "org/apache/lucene/document/Field.html">Field</a>s,\r
+whose values may be strings or instances of <a href = "http://java.sun.com//products/jdk/1.2/docs/api/java/io/Reader.html">java.io.Reader</a>.</li>\r
+\r
+<li>\r
+<b><a href = "org/apache/lucene/index/package-summary.html">Lucene.Net.Index</a></b>\r
+provides two primary classes: <a href = "org/apache/lucene/index/IndexWriter.html">IndexWriter</a>,\r
+which creates and adds documents to indices; and <a href = "org/apache/lucene/index/IndexReader.html">IndexReader</a>,\r
+which accesses the data in the index.</li>\r
+\r
+<li>\r
+<b><a href = "org/apache/lucene/search/package-summary.html">Lucene.Net.Search</a></b>\r
+provides data structures to represent queries (ie <a href = "org/apache/lucene/search/TermQuery.html">TermQuery</a>\r
+for individual words, <a href = "org/apache/lucene/search/PhraseQuery.html">PhraseQuery</a>\r
+for phrases, and <a href = "org/apache/lucene/search/BooleanQuery.html">BooleanQuery</a>\r
+for boolean combinations of queries) and the abstract <a href = "org/apache/lucene/search/Searcher.html">Searcher</a>\r
+which turns queries into <a href = "org/apache/lucene/search/TopDocs.html">TopDocs</a>.\r
+<a href = "org/apache/lucene/search/IndexSearcher.html">IndexSearcher</a>\r
+implements search over a single IndexReader.</li>\r
+\r
+<li>\r
+<b><a href = "org/apache/lucene/queryParser/package-summary.html">Lucene.Net.QueryParsers</a></b>\r
+uses <a href = "http://javacc.dev.java.net">JavaCC</a> to implement a\r
+<a href = "org/apache/lucene/queryParser/QueryParser.html">QueryParser</a>.</li>\r
+\r
+<li>\r
+<b><a href = "org/apache/lucene/store/package-summary.html">Lucene.Net.Store</a></b>\r
+defines an abstract class for storing persistent data, the <a href = "org/apache/lucene/store/Directory.html">Directory</a>,\r
+which is a collection of named files written by an <a href = "org/apache/lucene/store/IndexOutput.html">IndexOutput</a>\r
+and read by an <a href = "org/apache/lucene/store/IndexInput.html">IndexInput</a>.&nbsp;\r
+Multiple implementations are provided, including <a href = "org/apache/lucene/store/FSDirectory.html">FSDirectory</a>,\r
+which uses a file system directory to store files, and <a href = "org/apache/lucene/store/RAMDirectory.html">RAMDirectory</a>\r
+which implements files as memory-resident data structures.</li>\r
+\r
+<li>\r
+<b><a href = "org/apache/lucene/util/package-summary.html">Lucene.Net.Util</a></b>\r
+contains a few handy data structures and util classes, ie <a href = "org/apache/lucene/util/BitVector.html">BitVector</a>\r
+and <a href = "org/apache/lucene/util/PriorityQueue.html">PriorityQueue</a>.</li>\r
+</ul>\r
+To use Lucene, an application should:\r
+<ol>\r
+<li>\r
+Create <a href = "org/apache/lucene/document/Document.html">Document</a>s by\r
+adding\r
+<a href = "org/apache/lucene/document/Field.html">Field</a>s;</li>\r
+\r
+<li>\r
+Create an <a href = "org/apache/lucene/index/IndexWriter.html">IndexWriter</a>\r
+and add documents to it with <a href = "org/apache/lucene/index/IndexWriter.html#addDocument(Lucene.Net.Documents.Document)">addDocument()</a>;</li>\r
+\r
+<li>\r
+Call <a href = "org/apache/lucene/queryParser/QueryParser.html#parse(java.lang.String)">QueryParser.parse()</a>\r
+to build a query from a string; and</li>\r
+\r
+<li>\r
+Create an <a href = "org/apache/lucene/search/IndexSearcher.html">IndexSearcher</a>\r
+and pass the query to its <a href = "org/apache/lucene/search/Searcher.html#search(Lucene.Net.Search.Query)">search()</a>\r
+method.</li>\r
+</ol>\r
+Some simple examples of code which does this are:\r
+<ul>\r
+<li>\r
+&nbsp;<a href = "http://svn.apache.org//repos/asf/lucene/java/trunk/src/demo/org/apache/lucene/demo/FileDocument.java">FileDocument.java</a> contains\r
+code to create a Document for a file.</li>\r
+\r
+<li>\r
+&nbsp;<a href = "http://svn.apache.org//repos/asf/lucene/java/trunk/src/demo/org/apache/lucene/demo/IndexFiles.java">IndexFiles.java</a> creates an\r
+index for all the files contained in a directory.</li>\r
+\r
+<li>\r
+&nbsp;<a href = "http://svn.apache.org//repos/asf/lucene/java/trunk/src/demo/org/apache/lucene/demo/DeleteFiles.java">DeleteFiles.java</a> deletes some\r
+of these files from the index.</li>\r
+\r
+<li>\r
+&nbsp;<a href = "http://svn.apache.org//repos/asf/lucene/java/trunk/src/demo/org/apache/lucene/demo/SearchFiles.java">SearchFiles.java</a> prompts for\r
+queries and searches an index.</li>\r
+</ul>\r
+To demonstrate these, try something like:\r
+<blockquote><tt>> <b>java -cp lucene.jar:lucene-demo.jar Lucene.Net.demo.IndexFiles rec.food.recipes/soups</b></tt>\r
+<br><tt>adding rec.food.recipes/soups/abalone-chowder</tt>\r
+<br><tt>&nbsp; </tt>[ ... ]\r
+\r
+<p><tt>> <b>java -cp lucene.jar:lucene-demo.jar Lucene.Net.demo.SearchFiles</b></tt>\r
+<br><tt>Query: <b>chowder</b></tt>\r
+<br><tt>Searching for: chowder</tt>\r
+<br><tt>34 total matching documents</tt>\r
+<br><tt>1. rec.food.recipes/soups/spam-chowder</tt>\r
+<br><tt>&nbsp; </tt>[ ... thirty-four documents contain the word "chowder" ... ]\r
+\r
+<p><tt>Query: <b>"clam chowder" AND Manhattan</b></tt>\r
+<br><tt>Searching for: +"clam chowder" +manhattan</tt>\r
+<br><tt>2 total matching documents</tt>\r
+<br><tt>1. rec.food.recipes/soups/clam-chowder</tt>\r
+<br><tt>&nbsp; </tt>[ ... two documents contain the phrase "clam chowder"\r
+and the word "manhattan" ... ]\r
+<br>&nbsp;&nbsp;&nbsp; [ Note: "+" and "-" are canonical, but "AND", "OR"\r
+and "NOT" may be used. ]</blockquote>\r
+\r
+The <a href = "http://svn.apache.org//repos/asf/lucene/java/trunk/src/demo/org/apache/lucene/demo/IndexHTML.java">IndexHTML</a> demo is more sophisticated.&nbsp;\r
+It incrementally maintains an index of HTML files, adding new files as\r
+they appear, deleting old files as they disappear and re-indexing files\r
+as they change.\r
+<blockquote><tt>> <b>java -cp lucene.jar:lucene-demo.jar Lucene.Net.demo.IndexHTML -create java/jdk1.1.6/docs/relnotes</b></tt>\r
+<br><tt>adding java/jdk1.1.6/docs/relnotes/SMICopyright.html</tt>\r
+<br><tt>&nbsp; </tt>[ ... create an index containing all the relnotes ]\r
+<p><tt>> <b>rm java/jdk1.1.6/docs/relnotes/smicopyright.html</b></tt>\r
+<p><tt>> <b>java -cp lucene.jar:lucene-demo.jar Lucene.Net.demo.IndexHTML java/jdk1.1.6/docs/relnotes</b></tt>\r
+<br><tt>deleting java/jdk1.1.6/docs/relnotes/SMICopyright.html</tt></blockquote>\r
+\r
+</body>\r
+</html>\r
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Package.html b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Package.html
new file mode 100644 (file)
index 0000000..8026ee1
--- /dev/null
@@ -0,0 +1,17 @@
+<!--\r
+ Licensed to the Apache Software Foundation (ASF) under one or more\r
+ contributor license agreements.  See the NOTICE file distributed with\r
+ this work for additional information regarding copyright ownership.\r
+ The ASF licenses this file to You under the Apache License, Version 2.0\r
+ (the "License"); you may not use this file except in compliance with\r
+ the License.  You may obtain a copy of the License at\r
+\r
+     http://www.apache.org/licenses/LICENSE-2.0\r
+\r
+ Unless required by applicable law or agreed to in writing, software\r
+ distributed under the License is distributed on an "AS IS" BASIS,\r
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
+ See the License for the specific language governing permissions and\r
+ limitations under the License.\r
+-->\r
+<html><body>Top-level package.</body></html>\r
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/QueryParser/.gitattributes b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/QueryParser/.gitattributes
new file mode 100644 (file)
index 0000000..7e206e9
--- /dev/null
@@ -0,0 +1,11 @@
+/CharStream.cs -crlf
+/FastCharStream.cs -crlf
+/MultiFieldQueryParser.cs -crlf
+/Package.html -crlf
+/ParseException.cs -crlf
+/QueryParser.JJ -crlf
+/QueryParser.cs -crlf
+/QueryParserConstants.cs -crlf
+/QueryParserTokenManager.cs -crlf
+/Token.cs -crlf
+/TokenMgrError.cs -crlf
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/QueryParser/CharStream.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/QueryParser/CharStream.cs
new file mode 100644 (file)
index 0000000..34235bf
--- /dev/null
@@ -0,0 +1,125 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* Generated By:JavaCC: Do not edit this line. CharStream.java Version 4.1 */
+/* JavaCCOptions:STATIC=false */
+
+using System;
+
+namespace Mono.Lucene.Net.QueryParsers
+{
+       
+       /// <summary> This interface describes a character stream that maintains line and
+       /// column number positions of the characters.  It also has the capability
+       /// to backup the stream to some extent.  An implementation of this
+       /// interface is used in the TokenManager implementation generated by
+       /// JavaCCParser.
+       /// 
+       /// All the methods except backup can be implemented in any fashion. backup
+       /// needs to be implemented correctly for the correct operation of the lexer.
+       /// Rest of the methods are all used to get information like line number,
+       /// column number and the String that constitutes a token and are not used
+       /// by the lexer. Hence their implementation won't affect the generated lexer's
+       /// operation.
+       /// </summary>
+       
+       public interface CharStream
+       {
+               
+               /// <summary> Returns the next character from the selected input.  The method
+               /// of selecting the input is the responsibility of the class
+               /// implementing this interface.  Can throw any java.io.IOException.
+               /// </summary>
+               char ReadChar();
+               
+               /// <summary> Returns the column position of the character last read.</summary>
+               /// <deprecated>
+               /// </deprecated>
+               /// <seealso cref="getEndColumn">
+               /// </seealso>
+        [Obsolete]
+               int GetColumn();
+               
+               /// <summary> Returns the line number of the character last read.</summary>
+               /// <deprecated>
+               /// </deprecated>
+               /// <seealso cref="getEndLine">
+               /// </seealso>
+        [Obsolete]
+               int GetLine();
+               
+               /// <summary> Returns the column number of the last character for current token (being
+               /// matched after the last call to BeginTOken).
+               /// </summary>
+               int GetEndColumn();
+               
+               /// <summary> Returns the line number of the last character for current token (being
+               /// matched after the last call to BeginTOken).
+               /// </summary>
+               int GetEndLine();
+               
+               /// <summary> Returns the column number of the first character for current token (being
+               /// matched after the last call to BeginTOken).
+               /// </summary>
+               int GetBeginColumn();
+               
+               /// <summary> Returns the line number of the first character for current token (being
+               /// matched after the last call to BeginTOken).
+               /// </summary>
+               int GetBeginLine();
+               
+               /// <summary> Backs up the input stream by amount steps. Lexer calls this method if it
+               /// had already read some characters, but could not use them to match a
+               /// (longer) token. So, they will be used again as the prefix of the next
+               /// token and it is the implemetation's responsibility to do this right.
+               /// </summary>
+               void  Backup(int amount);
+               
+               /// <summary> Returns the next character that marks the beginning of the next token.
+               /// All characters must remain in the buffer between two successive calls
+               /// to this method to implement backup correctly.
+               /// </summary>
+               char BeginToken();
+               
+               /// <summary> Returns a string made up of characters from the marked token beginning
+               /// to the current buffer position. Implementations have the choice of returning
+               /// anything that they want to. For example, for efficiency, one might decide
+               /// to just return null, which is a valid implementation.
+               /// </summary>
+               System.String GetImage();
+               
+               /// <summary> Returns an array of characters that make up the suffix of length 'len' for
+               /// the currently matched token. This is used to build up the matched string
+               /// for use in actions in the case of MORE. A simple and inefficient
+               /// implementation of this is as follows :
+               /// 
+               /// {
+               /// String t = GetImage();
+               /// return t.substring(t.length() - len, t.length()).toCharArray();
+               /// }
+               /// </summary>
+               char[] GetSuffix(int len);
+               
+               /// <summary> The lexer calls this function to indicate that it is done with the stream
+               /// and hence implementations can free any resources held by this class.
+               /// Again, the body of this function can be just empty and it will not
+               /// affect the lexer's operation.
+               /// </summary>
+               void  Done();
+       }
+       /* JavaCC - OriginalChecksum=a83909a2403f969f94d18375f9f143e4 (do not edit this line) */
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/QueryParser/FastCharStream.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/QueryParser/FastCharStream.cs
new file mode 100644 (file)
index 0000000..505e640
--- /dev/null
@@ -0,0 +1,154 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// FastCharStream.java
+
+using System;
+
+namespace Mono.Lucene.Net.QueryParsers
+{
+       
+       /// <summary>An efficient implementation of JavaCC's CharStream interface.  <p/>Note that
+       /// this does not do line-number counting, but instead keeps track of the
+       /// character position of the token in the input, as required by Lucene's {@link
+       /// Mono.Lucene.Net.Analysis.Token} API. 
+       /// 
+       /// </summary>
+       public sealed class FastCharStream : CharStream
+       {
+               internal char[] buffer = null;
+               
+               internal int bufferLength = 0; // end of valid chars
+               internal int bufferPosition = 0; // next char to read
+               
+               internal int tokenStart = 0; // offset in buffer
+               internal int bufferStart = 0; // position in file of buffer
+               
+               internal System.IO.TextReader input; // source of chars
+               
+               /// <summary>Constructs from a Reader. </summary>
+               public FastCharStream(System.IO.TextReader r)
+               {
+                       input = r;
+               }
+               
+               public char ReadChar()
+               {
+                       if (bufferPosition >= bufferLength)
+                               Refill();
+                       return buffer[bufferPosition++];
+               }
+               
+               private void  Refill()
+               {
+                       int newPosition = bufferLength - tokenStart;
+                       
+                       if (tokenStart == 0)
+                       {
+                               // token won't fit in buffer
+                               if (buffer == null)
+                               {
+                                       // first time: alloc buffer
+                                       buffer = new char[2048];
+                               }
+                               else if (bufferLength == buffer.Length)
+                               {
+                                       // grow buffer
+                                       char[] newBuffer = new char[buffer.Length * 2];
+                                       Array.Copy(buffer, 0, newBuffer, 0, bufferLength);
+                                       buffer = newBuffer;
+                               }
+                       }
+                       else
+                       {
+                               // shift token to front
+                               Array.Copy(buffer, tokenStart, buffer, 0, newPosition);
+                       }
+                       
+                       bufferLength = newPosition; // update state
+                       bufferPosition = newPosition;
+                       bufferStart += tokenStart;
+                       tokenStart = 0;
+                       
+                       int charsRead = input.Read(buffer, newPosition, buffer.Length - newPosition);
+                       if (charsRead <= 0)
+                               throw new System.IO.IOException("read past eof");
+                       else
+                               bufferLength += charsRead;
+               }
+               
+               public char BeginToken()
+               {
+                       tokenStart = bufferPosition;
+                       return ReadChar();
+               }
+               
+               public void  Backup(int amount)
+               {
+                       bufferPosition -= amount;
+               }
+               
+               public System.String GetImage()
+               {
+                       return new System.String(buffer, tokenStart, bufferPosition - tokenStart);
+               }
+               
+               public char[] GetSuffix(int len)
+               {
+                       char[] value_Renamed = new char[len];
+                       Array.Copy(buffer, bufferPosition - len, value_Renamed, 0, len);
+                       return value_Renamed;
+               }
+               
+               public void  Done()
+               {
+                       try
+                       {
+                               input.Close();
+                       }
+                       catch (System.IO.IOException e)
+                       {
+                               System.Console.Error.WriteLine("Caught: " + e + "; ignoring.");
+                       }
+               }
+               
+               public int GetColumn()
+               {
+                       return bufferStart + bufferPosition;
+               }
+               public int GetLine()
+               {
+                       return 1;
+               }
+               public int GetEndColumn()
+               {
+                       return bufferStart + bufferPosition;
+               }
+               public int GetEndLine()
+               {
+                       return 1;
+               }
+               public int GetBeginColumn()
+               {
+                       return bufferStart + tokenStart;
+               }
+               public int GetBeginLine()
+               {
+                       return 1;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/QueryParser/MultiFieldQueryParser.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/QueryParser/MultiFieldQueryParser.cs
new file mode 100644 (file)
index 0000000..ff340f3
--- /dev/null
@@ -0,0 +1,584 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using Analyzer = Mono.Lucene.Net.Analysis.Analyzer;
+using BooleanClause = Mono.Lucene.Net.Search.BooleanClause;
+using BooleanQuery = Mono.Lucene.Net.Search.BooleanQuery;
+using MultiPhraseQuery = Mono.Lucene.Net.Search.MultiPhraseQuery;
+using PhraseQuery = Mono.Lucene.Net.Search.PhraseQuery;
+using Query = Mono.Lucene.Net.Search.Query;
+using Version = Mono.Lucene.Net.Util.Version;
+
+namespace Mono.Lucene.Net.QueryParsers
+{
+       
+       /// <summary> A QueryParser which constructs queries to search multiple fields.
+       /// 
+       /// </summary>
+       /// <version>  $Revision: 829134 $
+       /// </version>
+       public class MultiFieldQueryParser:QueryParser
+       {
+               protected internal System.String[] fields;
+               protected internal System.Collections.IDictionary boosts;
+               
+               /// <summary> Creates a MultiFieldQueryParser. Allows passing of a map with term to
+               /// Boost, and the boost to apply to each term.
+               /// 
+               /// <p/>
+               /// It will, when parse(String query) is called, construct a query like this
+               /// (assuming the query consists of two terms and you specify the two fields
+               /// <code>title</code> and <code>body</code>):
+               /// <p/>
+               /// 
+               /// <code>
+               /// (title:term1 body:term1) (title:term2 body:term2)
+               /// </code>
+               /// 
+               /// <p/>
+               /// When setDefaultOperator(AND_OPERATOR) is set, the result will be:
+               /// <p/>
+               /// 
+               /// <code>
+               /// +(title:term1 body:term1) +(title:term2 body:term2)
+               /// </code>
+               /// 
+               /// <p/>
+               /// When you pass a boost (title=>5 body=>10) you can get
+               /// <p/>
+               /// 
+               /// <code>
+               /// +(title:term1^5.0 body:term1^10.0) +(title:term2^5.0 body:term2^10.0)
+               /// </code>
+               /// 
+               /// <p/>
+               /// In other words, all the query's terms must appear, but it doesn't matter
+               /// in what fields they appear.
+               /// <p/>
+               /// 
+               /// </summary>
+               /// <deprecated> Please use
+               /// {@link #MultiFieldQueryParser(Version, String[], Analyzer, Map)}
+               /// instead
+               /// </deprecated>
+        [Obsolete("Please use MultiFieldQueryParser(Version, String[], Analyzer, IDictionary) instead")]
+               public MultiFieldQueryParser(System.String[] fields, Analyzer analyzer, System.Collections.IDictionary boosts):this(Version.LUCENE_24, fields, analyzer)
+               {
+                       this.boosts = boosts;
+               }
+               
+               /// <summary> Creates a MultiFieldQueryParser. Allows passing of a map with term to
+               /// Boost, and the boost to apply to each term.
+               /// 
+               /// <p/>
+               /// It will, when parse(String query) is called, construct a query like this
+               /// (assuming the query consists of two terms and you specify the two fields
+               /// <code>title</code> and <code>body</code>):
+               /// <p/>
+               /// 
+               /// <code>
+               /// (title:term1 body:term1) (title:term2 body:term2)
+               /// </code>
+               /// 
+               /// <p/>
+               /// When setDefaultOperator(AND_OPERATOR) is set, the result will be:
+               /// <p/>
+               /// 
+               /// <code>
+               /// +(title:term1 body:term1) +(title:term2 body:term2)
+               /// </code>
+               /// 
+               /// <p/>
+               /// When you pass a boost (title=>5 body=>10) you can get
+               /// <p/>
+               /// 
+               /// <code>
+               /// +(title:term1^5.0 body:term1^10.0) +(title:term2^5.0 body:term2^10.0)
+               /// </code>
+               /// 
+               /// <p/>
+               /// In other words, all the query's terms must appear, but it doesn't matter
+               /// in what fields they appear.
+               /// <p/>
+               /// </summary>
+               public MultiFieldQueryParser(Version matchVersion, System.String[] fields, Analyzer analyzer, System.Collections.IDictionary boosts):this(matchVersion, fields, analyzer)
+               {
+                       this.boosts = boosts;
+               }
+               
+               /// <summary> Creates a MultiFieldQueryParser.
+               /// 
+               /// <p/>
+               /// It will, when parse(String query) is called, construct a query like this
+               /// (assuming the query consists of two terms and you specify the two fields
+               /// <code>title</code> and <code>body</code>):
+               /// <p/>
+               /// 
+               /// <code>
+               /// (title:term1 body:term1) (title:term2 body:term2)
+               /// </code>
+               /// 
+               /// <p/>
+               /// When setDefaultOperator(AND_OPERATOR) is set, the result will be:
+               /// <p/>
+               /// 
+               /// <code>
+               /// +(title:term1 body:term1) +(title:term2 body:term2)
+               /// </code>
+               /// 
+               /// <p/>
+               /// In other words, all the query's terms must appear, but it doesn't matter
+               /// in what fields they appear.
+               /// <p/>
+               /// 
+               /// </summary>
+               /// <deprecated> Please use
+               /// {@link #MultiFieldQueryParser(Version, String[], Analyzer)}
+               /// instead
+               /// </deprecated>
+        [Obsolete("Please use MultiFieldQueryParser(Version, String[], Analyzer) instead")]
+               public MultiFieldQueryParser(System.String[] fields, Analyzer analyzer):this(Version.LUCENE_24, fields, analyzer)
+               {
+               }
+               
+               /// <summary> Creates a MultiFieldQueryParser.
+               /// 
+               /// <p/>
+               /// It will, when parse(String query) is called, construct a query like this
+               /// (assuming the query consists of two terms and you specify the two fields
+               /// <code>title</code> and <code>body</code>):
+               /// <p/>
+               /// 
+               /// <code>
+               /// (title:term1 body:term1) (title:term2 body:term2)
+               /// </code>
+               /// 
+               /// <p/>
+               /// When setDefaultOperator(AND_OPERATOR) is set, the result will be:
+               /// <p/>
+               /// 
+               /// <code>
+               /// +(title:term1 body:term1) +(title:term2 body:term2)
+               /// </code>
+               /// 
+               /// <p/>
+               /// In other words, all the query's terms must appear, but it doesn't matter
+               /// in what fields they appear.
+               /// <p/>
+               /// </summary>
+               public MultiFieldQueryParser(Version matchVersion, System.String[] fields, Analyzer analyzer):base(matchVersion, null, analyzer)
+               {
+                       this.fields = fields;
+               }
+               
+               protected internal override Query GetFieldQuery(System.String field, System.String queryText, int slop)
+               {
+                       if (field == null)
+                       {
+                               System.Collections.IList clauses = new System.Collections.ArrayList();
+                               for (int i = 0; i < fields.Length; i++)
+                               {
+                                       Query q = base.GetFieldQuery(fields[i], queryText);
+                                       if (q != null)
+                                       {
+                                               //If the user passes a map of boosts
+                                               if (boosts != null)
+                                               {
+                                                       //Get the boost from the map and apply them
+                            if (boosts.Contains(fields[i]))
+                                                       {
+                                                               System.Single boost = (System.Single) boosts[fields[i]];
+                                                               q.SetBoost((float) boost);
+                                                       }
+                                               }
+                                               ApplySlop(q, slop);
+                                               clauses.Add(new BooleanClause(q, BooleanClause.Occur.SHOULD));
+                                       }
+                               }
+                               if (clauses.Count == 0)
+                               // happens for stopwords
+                                       return null;
+                               return GetBooleanQuery(clauses, true);
+                       }
+                       Query q2 = base.GetFieldQuery(field, queryText);
+                       ApplySlop(q2, slop);
+                       return q2;
+               }
+               
+               private void  ApplySlop(Query q, int slop)
+               {
+                       if (q is PhraseQuery)
+                       {
+                               ((PhraseQuery) q).SetSlop(slop);
+                       }
+                       else if (q is MultiPhraseQuery)
+                       {
+                               ((MultiPhraseQuery) q).SetSlop(slop);
+                       }
+               }
+               
+               
+               public /*protected internal*/ override Query GetFieldQuery(System.String field, System.String queryText)
+               {
+                       return GetFieldQuery(field, queryText, 0);
+               }
+               
+               
+               public /*protected internal*/ override Query GetFuzzyQuery(System.String field, System.String termStr, float minSimilarity)
+               {
+                       if (field == null)
+                       {
+                               System.Collections.IList clauses = new System.Collections.ArrayList();
+                               for (int i = 0; i < fields.Length; i++)
+                               {
+                                       clauses.Add(new BooleanClause(GetFuzzyQuery(fields[i], termStr, minSimilarity), BooleanClause.Occur.SHOULD));
+                               }
+                               return GetBooleanQuery(clauses, true);
+                       }
+                       return base.GetFuzzyQuery(field, termStr, minSimilarity);
+               }
+               
+               public /*protected internal*/ override Query GetPrefixQuery(System.String field, System.String termStr)
+               {
+                       if (field == null)
+                       {
+                               System.Collections.IList clauses = new System.Collections.ArrayList();
+                               for (int i = 0; i < fields.Length; i++)
+                               {
+                                       clauses.Add(new BooleanClause(GetPrefixQuery(fields[i], termStr), BooleanClause.Occur.SHOULD));
+                               }
+                               return GetBooleanQuery(clauses, true);
+                       }
+                       return base.GetPrefixQuery(field, termStr);
+               }
+               
+               public /*protected internal*/ override Query GetWildcardQuery(System.String field, System.String termStr)
+               {
+                       if (field == null)
+                       {
+                               System.Collections.IList clauses = new System.Collections.ArrayList();
+                               for (int i = 0; i < fields.Length; i++)
+                               {
+                                       clauses.Add(new BooleanClause(GetWildcardQuery(fields[i], termStr), BooleanClause.Occur.SHOULD));
+                               }
+                               return GetBooleanQuery(clauses, true);
+                       }
+                       return base.GetWildcardQuery(field, termStr);
+               }
+               
+               
+               protected internal override Query GetRangeQuery(System.String field, System.String part1, System.String part2, bool inclusive)
+               {
+                       if (field == null)
+                       {
+                               System.Collections.IList clauses = new System.Collections.ArrayList();
+                               for (int i = 0; i < fields.Length; i++)
+                               {
+                                       clauses.Add(new BooleanClause(GetRangeQuery(fields[i], part1, part2, inclusive), BooleanClause.Occur.SHOULD));
+                               }
+                               return GetBooleanQuery(clauses, true);
+                       }
+                       return base.GetRangeQuery(field, part1, part2, inclusive);
+               }
+               
+               /// <summary> Parses a query which searches on the fields specified.
+               /// <p/>
+               /// If x fields are specified, this effectively constructs:
+               /// 
+               /// <pre>
+               /// &lt;code&gt;
+               /// (field1:query1) (field2:query2) (field3:query3)...(fieldx:queryx)
+               /// &lt;/code&gt;
+               /// </pre>
+               /// 
+               /// </summary>
+               /// <param name="queries">Queries strings to parse
+               /// </param>
+               /// <param name="fields">Fields to search on
+               /// </param>
+               /// <param name="analyzer">Analyzer to use
+               /// </param>
+               /// <throws>  ParseException </throws>
+               /// <summary>             if query parsing fails
+               /// </summary>
+               /// <throws>  IllegalArgumentException </throws>
+               /// <summary>             if the length of the queries array differs from the length of
+               /// the fields array
+               /// </summary>
+               /// <deprecated> Use {@link #Parse(Version,String[],String[],Analyzer)}
+               /// instead
+               /// </deprecated>
+        [Obsolete("Use Parse(Version,String[],String[],Analyzer) instead")]
+               public static Query Parse(System.String[] queries, System.String[] fields, Analyzer analyzer)
+               {
+                       return Parse(Version.LUCENE_24, queries, fields, analyzer);
+               }
+               
+               /// <summary> Parses a query which searches on the fields specified.
+               /// <p/>
+               /// If x fields are specified, this effectively constructs:
+               /// 
+               /// <pre>
+               /// &lt;code&gt;
+               /// (field1:query1) (field2:query2) (field3:query3)...(fieldx:queryx)
+               /// &lt;/code&gt;
+               /// </pre>
+               /// 
+               /// </summary>
+               /// <param name="matchVersion">Lucene version to match; this is passed through to
+               /// QueryParser.
+               /// </param>
+               /// <param name="queries">Queries strings to parse
+               /// </param>
+               /// <param name="fields">Fields to search on
+               /// </param>
+               /// <param name="analyzer">Analyzer to use
+               /// </param>
+               /// <throws>  ParseException </throws>
+               /// <summary>             if query parsing fails
+               /// </summary>
+               /// <throws>  IllegalArgumentException </throws>
+               /// <summary>             if the length of the queries array differs from the length of
+               /// the fields array
+               /// </summary>
+               public static Query Parse(Version matchVersion, System.String[] queries, System.String[] fields, Analyzer analyzer)
+               {
+                       if (queries.Length != fields.Length)
+                               throw new System.ArgumentException("queries.length != fields.length");
+                       BooleanQuery bQuery = new BooleanQuery();
+                       for (int i = 0; i < fields.Length; i++)
+                       {
+                               QueryParser qp = new QueryParser(matchVersion, fields[i], analyzer);
+                               Query q = qp.Parse(queries[i]);
+                               if (q != null && (!(q is BooleanQuery) || ((BooleanQuery) q).GetClauses().Length > 0))
+                               {
+                                       bQuery.Add(q, BooleanClause.Occur.SHOULD);
+                               }
+                       }
+                       return bQuery;
+               }
+               
+               /// <summary> Parses a query, searching on the fields specified.
+               /// Use this if you need to specify certain fields as required,
+               /// and others as prohibited.
+               /// <p/><pre>
+               /// Usage:
+               /// <code>
+               /// String[] fields = {"filename", "contents", "description"};
+               /// BooleanClause.Occur[] flags = {BooleanClause.Occur.SHOULD,
+               /// BooleanClause.Occur.MUST,
+               /// BooleanClause.Occur.MUST_NOT};
+               /// MultiFieldQueryParser.parse("query", fields, flags, analyzer);
+               /// </code>
+               /// </pre>
+               /// <p/>
+               /// The code above would construct a query:
+               /// <pre>
+               /// <code>
+               /// (filename:query) +(contents:query) -(description:query)
+               /// </code>
+               /// </pre>
+               /// 
+               /// </summary>
+               /// <param name="query">Query string to parse
+               /// </param>
+               /// <param name="fields">Fields to search on
+               /// </param>
+               /// <param name="flags">Flags describing the fields
+               /// </param>
+               /// <param name="analyzer">Analyzer to use
+               /// </param>
+               /// <throws>  ParseException if query parsing fails </throws>
+               /// <throws>  IllegalArgumentException if the length of the fields array differs </throws>
+               /// <summary>  from the length of the flags array
+               /// </summary>
+               /// <deprecated> Use
+               /// {@link #Parse(Version, String, String[], BooleanClause.Occur[], Analyzer)}
+               /// instead
+               /// </deprecated>
+        [Obsolete("Use Parse(Version, String, String[], BooleanClause.Occur[], Analyzer) instead")]
+               public static Query Parse(System.String query, System.String[] fields, BooleanClause.Occur[] flags, Analyzer analyzer)
+               {
+                       return Parse(Version.LUCENE_24, query, fields, flags, analyzer);
+               }
+               
+               /// <summary> Parses a query, searching on the fields specified. Use this if you need
+               /// to specify certain fields as required, and others as prohibited.
+               /// <p/>
+               /// 
+               /// <pre>
+               /// Usage:
+               /// &lt;code&gt;
+               /// String[] fields = {&quot;filename&quot;, &quot;contents&quot;, &quot;description&quot;};
+               /// BooleanClause.Occur[] flags = {BooleanClause.Occur.SHOULD,
+               /// BooleanClause.Occur.MUST,
+               /// BooleanClause.Occur.MUST_NOT};
+               /// MultiFieldQueryParser.parse(&quot;query&quot;, fields, flags, analyzer);
+               /// &lt;/code&gt;
+               /// </pre>
+               /// <p/>
+               /// The code above would construct a query:
+               /// 
+               /// <pre>
+               /// &lt;code&gt;
+               /// (filename:query) +(contents:query) -(description:query)
+               /// &lt;/code&gt;
+               /// </pre>
+               /// 
+               /// </summary>
+               /// <param name="matchVersion">Lucene version to match; this is passed through to
+               /// QueryParser.
+               /// </param>
+               /// <param name="query">Query string to parse
+               /// </param>
+               /// <param name="fields">Fields to search on
+               /// </param>
+               /// <param name="flags">Flags describing the fields
+               /// </param>
+               /// <param name="analyzer">Analyzer to use
+               /// </param>
+               /// <throws>  ParseException </throws>
+               /// <summary>             if query parsing fails
+               /// </summary>
+               /// <throws>  IllegalArgumentException </throws>
+               /// <summary>             if the length of the fields array differs from the length of
+               /// the flags array
+               /// </summary>
+               public static Query Parse(Version matchVersion, System.String query, System.String[] fields, BooleanClause.Occur[] flags, Analyzer analyzer)
+               {
+                       if (fields.Length != flags.Length)
+                               throw new System.ArgumentException("fields.length != flags.length");
+                       BooleanQuery bQuery = new BooleanQuery();
+                       for (int i = 0; i < fields.Length; i++)
+                       {
+                               QueryParser qp = new QueryParser(matchVersion, fields[i], analyzer);
+                               Query q = qp.Parse(query);
+                               if (q != null && (!(q is BooleanQuery) || ((BooleanQuery) q).GetClauses().Length > 0))
+                               {
+                                       bQuery.Add(q, flags[i]);
+                               }
+                       }
+                       return bQuery;
+               }
+               
+               /// <summary> Parses a query, searching on the fields specified.
+               /// Use this if you need to specify certain fields as required,
+               /// and others as prohibited.
+               /// <p/><pre>
+               /// Usage:
+               /// <code>
+               /// String[] query = {"query1", "query2", "query3"};
+               /// String[] fields = {"filename", "contents", "description"};
+               /// BooleanClause.Occur[] flags = {BooleanClause.Occur.SHOULD,
+               /// BooleanClause.Occur.MUST,
+               /// BooleanClause.Occur.MUST_NOT};
+               /// MultiFieldQueryParser.parse(query, fields, flags, analyzer);
+               /// </code>
+               /// </pre>
+               /// <p/>
+               /// The code above would construct a query:
+               /// <pre>
+               /// <code>
+               /// (filename:query1) +(contents:query2) -(description:query3)
+               /// </code>
+               /// </pre>
+               /// 
+               /// </summary>
+               /// <param name="queries">Queries string to parse
+               /// </param>
+               /// <param name="fields">Fields to search on
+               /// </param>
+               /// <param name="flags">Flags describing the fields
+               /// </param>
+               /// <param name="analyzer">Analyzer to use
+               /// </param>
+               /// <throws>  ParseException if query parsing fails </throws>
+               /// <throws>  IllegalArgumentException if the length of the queries, fields, </throws>
+               /// <summary>  and flags array differ
+               /// </summary>
+               /// <deprecated> Used
+               /// {@link #Parse(Version, String[], String[], BooleanClause.Occur[], Analyzer)}
+               /// instead
+               /// </deprecated>
+        [Obsolete("Use Parse(Version, String[], String[], BooleanClause.Occur[], Analyzer) instead")]
+               public static Query Parse(System.String[] queries, System.String[] fields, BooleanClause.Occur[] flags, Analyzer analyzer)
+               {
+                       return Parse(Version.LUCENE_24, queries, fields, flags, analyzer);
+               }
+               
+               /// <summary> Parses a query, searching on the fields specified. Use this if you need
+               /// to specify certain fields as required, and others as prohibited.
+               /// <p/>
+               /// 
+               /// <pre>
+               /// Usage:
+               /// &lt;code&gt;
+               /// String[] query = {&quot;query1&quot;, &quot;query2&quot;, &quot;query3&quot;};
+               /// String[] fields = {&quot;filename&quot;, &quot;contents&quot;, &quot;description&quot;};
+               /// BooleanClause.Occur[] flags = {BooleanClause.Occur.SHOULD,
+               /// BooleanClause.Occur.MUST,
+               /// BooleanClause.Occur.MUST_NOT};
+               /// MultiFieldQueryParser.parse(query, fields, flags, analyzer);
+               /// &lt;/code&gt;
+               /// </pre>
+               /// <p/>
+               /// The code above would construct a query:
+               /// 
+               /// <pre>
+               /// &lt;code&gt;
+               /// (filename:query1) +(contents:query2) -(description:query3)
+               /// &lt;/code&gt;
+               /// </pre>
+               /// 
+               /// </summary>
+               /// <param name="matchVersion">Lucene version to match; this is passed through to
+               /// QueryParser.
+               /// </param>
+               /// <param name="queries">Queries string to parse
+               /// </param>
+               /// <param name="fields">Fields to search on
+               /// </param>
+               /// <param name="flags">Flags describing the fields
+               /// </param>
+               /// <param name="analyzer">Analyzer to use
+               /// </param>
+               /// <throws>  ParseException </throws>
+               /// <summary>             if query parsing fails
+               /// </summary>
+               /// <throws>  IllegalArgumentException </throws>
+               /// <summary>             if the length of the queries, fields, and flags array differ
+               /// </summary>
+               public static Query Parse(Version matchVersion, System.String[] queries, System.String[] fields, BooleanClause.Occur[] flags, Analyzer analyzer)
+               {
+                       if (!(queries.Length == fields.Length && queries.Length == flags.Length))
+                               throw new System.ArgumentException("queries, fields, and flags array have have different length");
+                       BooleanQuery bQuery = new BooleanQuery();
+                       for (int i = 0; i < fields.Length; i++)
+                       {
+                               QueryParser qp = new QueryParser(matchVersion, fields[i], analyzer);
+                               Query q = qp.Parse(queries[i]);
+                               if (q != null && (!(q is BooleanQuery) || ((BooleanQuery) q).GetClauses().Length > 0))
+                               {
+                                       bQuery.Add(q, flags[i]);
+                               }
+                       }
+                       return bQuery;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/QueryParser/Package.html b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/QueryParser/Package.html
new file mode 100644 (file)
index 0000000..a12d2b9
--- /dev/null
@@ -0,0 +1,35 @@
+<!doctype html public "-//w3c//dtd html 4.0 transitional//en">\r
+<!--\r
+ Licensed to the Apache Software Foundation (ASF) under one or more\r
+ contributor license agreements.  See the NOTICE file distributed with\r
+ this work for additional information regarding copyright ownership.\r
+ The ASF licenses this file to You under the Apache License, Version 2.0\r
+ (the "License"); you may not use this file except in compliance with\r
+ the License.  You may obtain a copy of the License at\r
+\r
+     http://www.apache.org/licenses/LICENSE-2.0\r
+\r
+ Unless required by applicable law or agreed to in writing, software\r
+ distributed under the License is distributed on an "AS IS" BASIS,\r
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
+ See the License for the specific language governing permissions and\r
+ limitations under the License.\r
+-->\r
+<html>\r
+<head>\r
+   <meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">\r
+</head>\r
+<body>\r
+\r
+A simple query parser implemented with JavaCC.\r
+<p>Note that JavaCC defines lots of public classes, methods and fields\r
+that do not need to be public.&nbsp; These clutter the documentation.&nbsp;\r
+Sorry.\r
+<p>Note that because JavaCC defines a class named <tt>Token</tt>, <tt>Lucene.Net.Analysis.Token</tt>\r
+must always be fully qualified in source code in this package.\r
+\r
+<p><b>NOTE</b>: contrib/queryparser has an alternative queryparser that matches the syntax of this one, but is more modular,\r
+enabling substantial customization to how a query is created.\r
+\r
+</body>\r
+</html>\r
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/QueryParser/ParseException.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/QueryParser/ParseException.cs
new file mode 100644 (file)
index 0000000..9873f3d
--- /dev/null
@@ -0,0 +1,243 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* Generated By:JavaCC: Do not edit this line. ParseException.java Version 4.1 */
+/* JavaCCOptions:KEEP_LINE_COL=null */
+
+using System;
+
+namespace Mono.Lucene.Net.QueryParsers
+{
+       
+       /// <summary> This exception is thrown when parse errors are encountered.
+       /// You can explicitly create objects of this exception type by
+       /// calling the method generateParseException in the generated
+       /// parser.
+       /// 
+       /// You can modify this class to customize your error reporting
+       /// mechanisms so long as you retain the public fields.
+       /// </summary>
+       [Serializable]
+       public class ParseException:System.Exception
+       {
+               /// <summary> This method has the standard behavior when this object has been
+               /// created using the standard constructors.  Otherwise, it uses
+               /// "currentToken" and "expectedTokenSequences" to generate a parse
+               /// error message and returns it.  If this object has been created
+               /// due to a parse error, and you do not catch it (it gets thrown
+               /// from the parser), then this method is called during the printing
+               /// of the final stack trace, and hence the correct error message
+               /// gets displayed.
+               /// </summary>
+               public override System.String Message
+               {
+                       get
+                       {
+                               if (!specialConstructor)
+                               {
+                                       return base.Message;
+                               }
+                               System.Text.StringBuilder expected = new System.Text.StringBuilder();
+                               int maxSize = 0;
+                               for (int i = 0; i < expectedTokenSequences.Length; i++)
+                               {
+                                       if (maxSize < expectedTokenSequences[i].Length)
+                                       {
+                                               maxSize = expectedTokenSequences[i].Length;
+                                       }
+                                       for (int j = 0; j < expectedTokenSequences[i].Length; j++)
+                                       {
+                                               expected.Append(tokenImage[expectedTokenSequences[i][j]]).Append(' ');
+                                       }
+                                       if (expectedTokenSequences[i][expectedTokenSequences[i].Length - 1] != 0)
+                                       {
+                                               expected.Append("...");
+                                       }
+                                       expected.Append(eol).Append("    ");
+                               }
+                               System.String retval = "Encountered \"";
+                               Token tok = currentToken.next;
+                               for (int i = 0; i < maxSize; i++)
+                               {
+                                       if (i != 0)
+                                               retval += " ";
+                                       if (tok.kind == 0)
+                                       {
+                                               retval += tokenImage[0];
+                                               break;
+                                       }
+                                       retval += (" " + tokenImage[tok.kind]);
+                                       retval += " \"";
+                                       retval += Add_escapes(tok.image);
+                                       retval += " \"";
+                                       tok = tok.next;
+                               }
+                               retval += ("\" at line " + currentToken.next.beginLine + ", column " + currentToken.next.beginColumn);
+                               retval += ("." + eol);
+                               if (expectedTokenSequences.Length == 1)
+                               {
+                                       retval += ("Was expecting:" + eol + "    ");
+                               }
+                               else
+                               {
+                                       retval += ("Was expecting one of:" + eol + "    ");
+                               }
+                               retval += expected.ToString();
+                               return retval;
+                       }
+                       
+               }
+               
+               /// <summary> This constructor is used by the method "generateParseException"
+               /// in the generated parser.  Calling this constructor generates
+               /// a new object of this type with the fields "currentToken",
+               /// "expectedTokenSequences", and "tokenImage" set.  The boolean
+               /// flag "specialConstructor" is also set to true to indicate that
+               /// this constructor was used to create this object.
+               /// This constructor calls its super class with the empty string
+               /// to force the "toString" method of parent class "Throwable" to
+               /// print the error message in the form:
+        /// ParseException: &lt;result of getMessage&gt;
+               /// </summary>
+               public ParseException(Token currentTokenVal, int[][] expectedTokenSequencesVal, System.String[] tokenImageVal):base("")
+               {
+                       specialConstructor = true;
+                       currentToken = currentTokenVal;
+                       expectedTokenSequences = expectedTokenSequencesVal;
+                       tokenImage = tokenImageVal;
+               }
+               
+               /// <summary> The following constructors are for use by you for whatever
+               /// purpose you can think of.  Constructing the exception in this
+               /// manner makes the exception behave in the normal way - i.e., as
+               /// documented in the class "Throwable".  The fields "errorToken",
+               /// "expectedTokenSequences", and "tokenImage" do not contain
+               /// relevant information.  The JavaCC generated code does not use
+               /// these constructors.
+               /// </summary>
+               
+               public ParseException():base()
+               {
+                       specialConstructor = false;
+               }
+               
+               /// <summary>Constructor with message. </summary>
+               public ParseException(System.String message):base(message)
+               {
+                       specialConstructor = false;
+               }
+               
+        /// <summary>Constructor with message. </summary>
+        public ParseException(System.String message, System.Exception ex) : base(message, ex)
+        {
+               specialConstructor = false;
+        }
+               
+               /// <summary> This variable determines which constructor was used to create
+               /// this object and thereby affects the semantics of the
+               /// "getMessage" method (see below).
+               /// </summary>
+               protected internal bool specialConstructor;
+               
+               /// <summary> This is the last token that has been consumed successfully.  If
+               /// this object has been created due to a parse error, the token
+               /// followng this token will (therefore) be the first error token.
+               /// </summary>
+               public Token currentToken;
+               
+               /// <summary> Each entry in this array is an array of integers.  Each array
+               /// of integers represents a sequence of tokens (by their ordinal
+               /// values) that is expected at this point of the parse.
+               /// </summary>
+               public int[][] expectedTokenSequences;
+               
+               /// <summary> This is a reference to the "tokenImage" array of the generated
+               /// parser within which the parse error occurred.  This array is
+               /// defined in the generated ...Constants interface.
+               /// </summary>
+               public System.String[] tokenImage;
+               
+               /// <summary> The end of line string for this machine.</summary>
+               protected internal System.String eol = SupportClass.AppSettings.Get("line.separator", "\n");
+               
+               /// <summary> Used to convert raw characters to their escaped version
+               /// when these raw version cannot be used as part of an ASCII
+               /// string literal.
+               /// </summary>
+               protected internal virtual System.String Add_escapes(System.String str)
+               {
+                       System.Text.StringBuilder retval = new System.Text.StringBuilder();
+                       char ch;
+                       for (int i = 0; i < str.Length; i++)
+                       {
+                               switch (str[i])
+                               {
+                                       
+                                       case (char) (0): 
+                                               continue;
+                                       
+                                       case '\b': 
+                                               retval.Append("\\b");
+                                               continue;
+                                       
+                                       case '\t': 
+                                               retval.Append("\\t");
+                                               continue;
+                                       
+                                       case '\n': 
+                                               retval.Append("\\n");
+                                               continue;
+                                       
+                                       case '\f': 
+                                               retval.Append("\\f");
+                                               continue;
+                                       
+                                       case '\r': 
+                                               retval.Append("\\r");
+                                               continue;
+                                       
+                                       case '\"': 
+                                               retval.Append("\\\"");
+                                               continue;
+                                       
+                                       case '\'': 
+                                               retval.Append("\\\'");
+                                               continue;
+                                       
+                                       case '\\': 
+                                               retval.Append("\\\\");
+                                               continue;
+                                       
+                                       default: 
+                                               if ((ch = str[i]) < 0x20 || ch > 0x7e)
+                                               {
+                                                       System.String s = "0000" + System.Convert.ToString(ch, 16);
+                                                       retval.Append("\\u" + s.Substring(s.Length - 4, (s.Length) - (s.Length - 4)));
+                                               }
+                                               else
+                                               {
+                                                       retval.Append(ch);
+                                               }
+                                               continue;
+                                       
+                               }
+                       }
+                       return retval.ToString();
+               }
+       }
+       /* JavaCC - OriginalChecksum=c63b396885c4ff44d7aa48d3feae60cd (do not edit this line) */
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/QueryParser/QueryParser.JJ b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/QueryParser/QueryParser.JJ
new file mode 100644 (file)
index 0000000..91d607d
--- /dev/null
@@ -0,0 +1,1468 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+options {
+  STATIC=false;
+  JAVA_UNICODE_ESCAPE=true;
+  USER_CHAR_STREAM=true;
+}
+
+PARSER_BEGIN(QueryParser)
+
+package org.apache.lucene.queryParser;
+
+import java.io.IOException;
+import java.io.StringReader;
+import java.text.Collator;
+import java.text.DateFormat;
+import java.util.ArrayList;
+import java.util.Calendar;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+import java.util.Vector;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.CachingTokenFilter;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
+import org.apache.lucene.analysis.tokenattributes.TermAttribute;
+import org.apache.lucene.document.DateField;
+import org.apache.lucene.document.DateTools;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.BooleanClause;
+import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.FuzzyQuery;
+import org.apache.lucene.search.MultiTermQuery;
+import org.apache.lucene.search.MatchAllDocsQuery;
+import org.apache.lucene.search.MultiPhraseQuery;
+import org.apache.lucene.search.PhraseQuery;
+import org.apache.lucene.search.PrefixQuery;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.TermRangeQuery;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.WildcardQuery;
+import org.apache.lucene.util.Parameter;
+import org.apache.lucene.util.Version;
+
+/**
+ * This class is generated by JavaCC.  The most important method is
+ * {@link #parse(String)}.
+ *
+ * The syntax for query strings is as follows:
+ * A Query is a series of clauses.
+ * A clause may be prefixed by:
+ * <ul>
+ * <li> a plus (<code>+</code>) or a minus (<code>-</code>) sign, indicating
+ * that the clause is required or prohibited respectively; or
+ * <li> a term followed by a colon, indicating the field to be searched.
+ * This enables one to construct queries which search multiple fields.
+ * </ul>
+ *
+ * A clause may be either:
+ * <ul>
+ * <li> a term, indicating all the documents that contain this term; or
+ * <li> a nested query, enclosed in parentheses.  Note that this may be used
+ * with a <code>+</code>/<code>-</code> prefix to require any of a set of
+ * terms.
+ * </ul>
+ *
+ * Thus, in BNF, the query grammar is:
+ * <pre>
+ *   Query  ::= ( Clause )*
+ *   Clause ::= ["+", "-"] [&lt;TERM&gt; ":"] ( &lt;TERM&gt; | "(" Query ")" )
+ * </pre>
+ *
+ * <p>
+ * Examples of appropriately formatted queries can be found in the <a
+ * href="../../../../../../queryparsersyntax.html">query syntax
+ * documentation</a>.
+ * </p>
+ *
+ * <p>
+ * In {@link TermRangeQuery}s, QueryParser tries to detect date values, e.g.
+ * <tt>date:[6/1/2005 TO 6/4/2005]</tt> produces a range query that searches
+ * for "date" fields between 2005-06-01 and 2005-06-04. Note that the format
+ * of the accepted input depends on {@link #setLocale(Locale) the locale}.
+ * By default a date is converted into a search term using the deprecated
+ * {@link DateField} for compatibility reasons.
+ * To use the new {@link DateTools} to convert dates, a
+ * {@link org.apache.lucene.document.DateTools.Resolution} has to be set.
+ * </p>
+ * <p>
+ * The date resolution that shall be used for RangeQueries can be set
+ * using {@link #setDateResolution(DateTools.Resolution)}
+ * or {@link #setDateResolution(String, DateTools.Resolution)}. The former
+ * sets the default date resolution for all fields, whereas the latter can
+ * be used to set field specific date resolutions. Field specific date
+ * resolutions take, if set, precedence over the default date resolution.
+ * </p>
+ * <p>
+ * If you use neither {@link DateField} nor {@link DateTools} in your
+ * index, you can create your own
+ * query parser that inherits QueryParser and overwrites
+ * {@link #getRangeQuery(String, String, String, boolean)} to
+ * use a different method for date conversion.
+ * </p>
+ *
+ * <p>Note that QueryParser is <em>not</em> thread-safe.</p> 
+ * 
+ * <p><b>NOTE</b>: there is a new QueryParser in contrib, which matches
+ * the same syntax as this class, but is more modular,
+ * enabling substantial customization to how a query is created.
+ *
+ * <a name="version"/>
+ * <p><b>NOTE</b>: You must specify the required {@link Version}
+ * compatibility when creating QueryParser:
+ * <ul>
+ *    <li> As of 2.9, {@link #setEnablePositionIncrements} is true by
+ *         default.
+ * </ul>
+ */
+public class QueryParser {
+
+  private static final int CONJ_NONE   = 0;
+  private static final int CONJ_AND    = 1;
+  private static final int CONJ_OR     = 2;
+
+  private static final int MOD_NONE    = 0;
+  private static final int MOD_NOT     = 10;
+  private static final int MOD_REQ     = 11;
+
+  // make it possible to call setDefaultOperator() without accessing 
+  // the nested class:
+  /** Alternative form of QueryParser.Operator.AND */
+  public static final Operator AND_OPERATOR = Operator.AND;
+  /** Alternative form of QueryParser.Operator.OR */
+  public static final Operator OR_OPERATOR = Operator.OR;
+
+  /** The actual operator that parser uses to combine query terms */
+  private Operator operator = OR_OPERATOR;
+
+  boolean lowercaseExpandedTerms = true;
+  MultiTermQuery.RewriteMethod multiTermRewriteMethod = MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT;
+  boolean allowLeadingWildcard = false;
+  boolean enablePositionIncrements = true;
+
+  Analyzer analyzer;
+  String field;
+  int phraseSlop = 0;
+  float fuzzyMinSim = FuzzyQuery.defaultMinSimilarity;
+  int fuzzyPrefixLength = FuzzyQuery.defaultPrefixLength;
+  Locale locale = Locale.getDefault();
+
+  // the default date resolution
+  DateTools.Resolution dateResolution = null;
+  // maps field names to date resolutions
+  Map fieldToDateResolution = null;
+
+  // The collator to use when determining range inclusion,
+  // for use when constructing RangeQuerys.
+  Collator rangeCollator = null;
+
+  /** The default operator for parsing queries. 
+   * Use {@link QueryParser#setDefaultOperator} to change it.
+   */
+  static public final class Operator extends Parameter {
+    private Operator(String name) {
+      super(name);
+    }
+    static public final Operator OR = new Operator("OR");
+    static public final Operator AND = new Operator("AND");
+  }
+
+
+  /** Constructs a query parser.
+   *  @param f  the default field for query terms.
+   *  @param a   used to find terms in the query text.
+   *  @deprecated Use {@link #QueryParser(Version, String, Analyzer)} instead
+   */
+  public QueryParser(String f, Analyzer a) {
+    this(Version.LUCENE_24, f, a);
+  }
+
+  /** Constructs a query parser.
+   *  @param matchVersion  Lucene version to match.  See {@link <a href="#version">above</a>)
+   *  @param f  the default field for query terms.
+   *  @param a   used to find terms in the query text.
+   */
+  public QueryParser(Version matchVersion, String f, Analyzer a) {
+    this(new FastCharStream(new StringReader("")));
+    analyzer = a;
+    field = f;
+    if (matchVersion.onOrAfter(Version.LUCENE_29)) {
+      enablePositionIncrements = true;
+    } else {
+      enablePositionIncrements = false;
+    }
+  }
+
+  /** Parses a query string, returning a {@link org.apache.lucene.search.Query}.
+   *  @param query  the query string to be parsed.
+   *  @throws ParseException if the parsing fails
+   */
+  public Query parse(String query) throws ParseException {
+    ReInit(new FastCharStream(new StringReader(query)));
+    try {
+      // TopLevelQuery is a Query followed by the end-of-input (EOF)
+      Query res = TopLevelQuery(field);
+      return res!=null ? res : newBooleanQuery(false);
+    }
+    catch (ParseException tme) {
+      // rethrow to include the original query:
+      ParseException e = new ParseException("Cannot parse '" +query+ "': " + tme.getMessage());
+      e.initCause(tme);
+      throw e;
+    }
+    catch (TokenMgrError tme) {
+      ParseException e = new ParseException("Cannot parse '" +query+ "': " + tme.getMessage());
+      e.initCause(tme);
+      throw e;
+    }
+    catch (BooleanQuery.TooManyClauses tmc) {
+      ParseException e = new ParseException("Cannot parse '" +query+ "': too many boolean clauses");
+      e.initCause(tmc);
+      throw e;
+    }
+  }
+
+   /**
+   * @return Returns the analyzer.
+   */
+  public Analyzer getAnalyzer() {
+    return analyzer;
+  }
+
+  /**
+   * @return Returns the field.
+   */
+  public String getField() {
+    return field;
+  }
+
+   /**
+   * Get the minimal similarity for fuzzy queries.
+   */
+  public float getFuzzyMinSim() {
+      return fuzzyMinSim;
+  }
+
+  /**
+   * Set the minimum similarity for fuzzy queries.
+   * Default is 0.5f.
+   */
+  public void setFuzzyMinSim(float fuzzyMinSim) {
+      this.fuzzyMinSim = fuzzyMinSim;
+  }
+
+   /**
+   * Get the prefix length for fuzzy queries. 
+   * @return Returns the fuzzyPrefixLength.
+   */
+  public int getFuzzyPrefixLength() {
+    return fuzzyPrefixLength;
+  }
+
+  /**
+   * Set the prefix length for fuzzy queries. Default is 0.
+   * @param fuzzyPrefixLength The fuzzyPrefixLength to set.
+   */
+  public void setFuzzyPrefixLength(int fuzzyPrefixLength) {
+    this.fuzzyPrefixLength = fuzzyPrefixLength;
+  }
+
+  /**
+   * Sets the default slop for phrases.  If zero, then exact phrase matches
+   * are required.  Default value is zero.
+   */
+  public void setPhraseSlop(int phraseSlop) {
+    this.phraseSlop = phraseSlop;
+  }
+
+  /**
+   * Gets the default slop for phrases.
+   */
+  public int getPhraseSlop() {
+    return phraseSlop;
+  }
+
+
+  /**
+   * Set to <code>true</code> to allow leading wildcard characters.
+   * <p>
+   * When set, <code>*</code> or <code>?</code> are allowed as 
+   * the first character of a PrefixQuery and WildcardQuery.
+   * Note that this can produce very slow
+   * queries on big indexes. 
+   * <p>
+   * Default: false.
+   */
+  public void setAllowLeadingWildcard(boolean allowLeadingWildcard) {
+    this.allowLeadingWildcard = allowLeadingWildcard;
+  }
+
+  /**
+   * @see #setAllowLeadingWildcard(boolean)
+   */
+  public boolean getAllowLeadingWildcard() {
+    return allowLeadingWildcard;
+  }
+
+  /**
+   * Set to <code>true</code> to enable position increments in result query.
+   * <p>
+   * When set, result phrase and multi-phrase queries will
+   * be aware of position increments.
+   * Useful when e.g. a StopFilter increases the position increment of
+   * the token that follows an omitted token.
+   * <p>
+   * Default: false.
+   */
+  public void setEnablePositionIncrements(boolean enable) {
+    this.enablePositionIncrements = enable;
+  }
+
+  /**
+   * @see #setEnablePositionIncrements(boolean)
+   */
+  public boolean getEnablePositionIncrements() {
+    return enablePositionIncrements;
+  }
+
+  /**
+   * Sets the boolean operator of the QueryParser.
+   * In default mode (<code>OR_OPERATOR</code>) terms without any modifiers
+   * are considered optional: for example <code>capital of Hungary</code> is equal to
+   * <code>capital OR of OR Hungary</code>.<br/>
+   * In <code>AND_OPERATOR</code> mode terms are considered to be in conjunction: the
+   * above mentioned query is parsed as <code>capital AND of AND Hungary</code>
+   */
+  public void setDefaultOperator(Operator op) {
+    this.operator = op;
+  }
+
+
+  /**
+   * Gets implicit operator setting, which will be either AND_OPERATOR
+   * or OR_OPERATOR.
+   */
+  public Operator getDefaultOperator() {
+    return operator;
+  }
+
+
+  /**
+   * Whether terms of wildcard, prefix, fuzzy and range queries are to be automatically
+   * lower-cased or not.  Default is <code>true</code>.
+   */
+  public void setLowercaseExpandedTerms(boolean lowercaseExpandedTerms) {
+    this.lowercaseExpandedTerms = lowercaseExpandedTerms;
+  }
+
+
+  /**
+   * @see #setLowercaseExpandedTerms(boolean)
+   */
+  public boolean getLowercaseExpandedTerms() {
+    return lowercaseExpandedTerms;
+  }
+
+  /**
+   * @deprecated Please use {@link #setMultiTermRewriteMethod} instead.
+   */
+  public void setUseOldRangeQuery(boolean useOldRangeQuery) {
+    if (useOldRangeQuery) {
+      setMultiTermRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE);
+    } else {
+      setMultiTermRewriteMethod(MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT);
+    }
+  }
+
+
+  /**
+   * @deprecated Please use {@link #getMultiTermRewriteMethod} instead.
+   */
+  public boolean getUseOldRangeQuery() {
+    if (getMultiTermRewriteMethod() == MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE) {
+      return true;
+    } else {
+      return false;
+    }
+  }
+
+  /**
+   * By default QueryParser uses {@link MultiTermQuery#CONSTANT_SCORE_AUTO_REWRITE_DEFAULT}
+   * when creating a PrefixQuery, WildcardQuery or RangeQuery. This implementation is generally preferable because it 
+   * a) Runs faster b) Does not have the scarcity of terms unduly influence score 
+   * c) avoids any "TooManyBooleanClauses" exception.
+   * However, if your application really needs to use the
+   * old-fashioned BooleanQuery expansion rewriting and the above
+   * points are not relevant then use this to change
+   * the rewrite method.
+   */
+  public void setMultiTermRewriteMethod(MultiTermQuery.RewriteMethod method) {
+    multiTermRewriteMethod = method;
+  }
+
+
+  /**
+   * @see #setMultiTermRewriteMethod
+   */
+  public MultiTermQuery.RewriteMethod getMultiTermRewriteMethod() {
+    return multiTermRewriteMethod;
+  }
+
+  /**
+   * Set locale used by date range parsing.
+   */
+  public void setLocale(Locale locale) {
+    this.locale = locale;
+  }
+
+  /**
+   * Returns current locale, allowing access by subclasses.
+   */
+  public Locale getLocale() {
+    return locale;
+  }
+
+  /**
+   * Sets the default date resolution used by RangeQueries for fields for which no
+   * specific date resolutions has been set. Field specific resolutions can be set
+   * with {@link #setDateResolution(String, DateTools.Resolution)}.
+   *  
+   * @param dateResolution the default date resolution to set
+   */
+  public void setDateResolution(DateTools.Resolution dateResolution) {
+    this.dateResolution = dateResolution;
+  }
+
+  /**
+   * Sets the date resolution used by RangeQueries for a specific field.
+   *  
+   * @param fieldName field for which the date resolution is to be set 
+   * @param dateResolution date resolution to set
+   */
+  public void setDateResolution(String fieldName, DateTools.Resolution dateResolution) {
+    if (fieldName == null) {
+      throw new IllegalArgumentException("Field cannot be null.");
+    }
+
+    if (fieldToDateResolution == null) {
+      // lazily initialize HashMap
+      fieldToDateResolution = new HashMap();
+    }
+
+    fieldToDateResolution.put(fieldName, dateResolution);
+  }
+
+  /**
+   * Returns the date resolution that is used by RangeQueries for the given field. 
+   * Returns null, if no default or field specific date resolution has been set
+   * for the given field.
+   *
+   */
+  public DateTools.Resolution getDateResolution(String fieldName) {
+    if (fieldName == null) {
+      throw new IllegalArgumentException("Field cannot be null.");
+    }
+
+    if (fieldToDateResolution == null) {
+      // no field specific date resolutions set; return default date resolution instead
+      return this.dateResolution;
+    }
+
+    DateTools.Resolution resolution = (DateTools.Resolution) fieldToDateResolution.get(fieldName);
+    if (resolution == null) {
+      // no date resolutions set for the given field; return default date resolution instead
+      resolution = this.dateResolution;
+    }
+
+    return resolution;
+  }
+  
+  /** 
+   * Sets the collator used to determine index term inclusion in ranges
+   * for RangeQuerys.
+   * <p/>
+   * <strong>WARNING:</strong> Setting the rangeCollator to a non-null
+   * collator using this method will cause every single index Term in the
+   * Field referenced by lowerTerm and/or upperTerm to be examined.
+   * Depending on the number of index Terms in this Field, the operation could
+   * be very slow.
+   *
+   *  @param rc  the collator to use when constructing RangeQuerys
+   */
+  public void setRangeCollator(Collator rc) {
+    rangeCollator = rc;
+  }
+  
+  /**
+   * @return the collator used to determine index term inclusion in ranges
+   * for RangeQuerys.
+   */
+  public Collator getRangeCollator() {
+    return rangeCollator;
+  }
+
+  /**
+   * @deprecated use {@link #addClause(List, int, int, Query)} instead.
+   */
+  protected void addClause(Vector clauses, int conj, int mods, Query q) {
+    addClause((List) clauses, conj, mods, q);
+  }
+
+  protected void addClause(List clauses, int conj, int mods, Query q) {
+    boolean required, prohibited;
+
+    // If this term is introduced by AND, make the preceding term required,
+    // unless it's already prohibited
+    if (clauses.size() > 0 && conj == CONJ_AND) {
+      BooleanClause c = (BooleanClause) clauses.get(clauses.size()-1);
+      if (!c.isProhibited())
+        c.setOccur(BooleanClause.Occur.MUST);
+    }
+
+    if (clauses.size() > 0 && operator == AND_OPERATOR && conj == CONJ_OR) {
+      // If this term is introduced by OR, make the preceding term optional,
+      // unless it's prohibited (that means we leave -a OR b but +a OR b-->a OR b)
+      // notice if the input is a OR b, first term is parsed as required; without
+      // this modification a OR b would parsed as +a OR b
+      BooleanClause c = (BooleanClause) clauses.get(clauses.size()-1);
+      if (!c.isProhibited())
+        c.setOccur(BooleanClause.Occur.SHOULD);
+    }
+
+    // We might have been passed a null query; the term might have been
+    // filtered away by the analyzer.
+    if (q == null)
+      return;
+
+    if (operator == OR_OPERATOR) {
+      // We set REQUIRED if we're introduced by AND or +; PROHIBITED if
+      // introduced by NOT or -; make sure not to set both.
+      prohibited = (mods == MOD_NOT);
+      required = (mods == MOD_REQ);
+      if (conj == CONJ_AND && !prohibited) {
+        required = true;
+      }
+    } else {
+      // We set PROHIBITED if we're introduced by NOT or -; We set REQUIRED
+      // if not PROHIBITED and not introduced by OR
+      prohibited = (mods == MOD_NOT);
+      required   = (!prohibited && conj != CONJ_OR);
+    }
+    if (required && !prohibited)
+      clauses.add(newBooleanClause(q, BooleanClause.Occur.MUST));
+    else if (!required && !prohibited)
+      clauses.add(newBooleanClause(q, BooleanClause.Occur.SHOULD));
+    else if (!required && prohibited)
+      clauses.add(newBooleanClause(q, BooleanClause.Occur.MUST_NOT));
+    else
+      throw new RuntimeException("Clause cannot be both required and prohibited");
+  }
+
+
+  /**
+   * @exception ParseException throw in overridden method to disallow
+   */
+  protected Query getFieldQuery(String field, String queryText)  throws ParseException {
+    // Use the analyzer to get all the tokens, and then build a TermQuery,
+    // PhraseQuery, or nothing based on the term count
+
+    TokenStream source;
+    try {
+      source = analyzer.reusableTokenStream(field, new StringReader(queryText));
+      source.reset();
+    } catch (IOException e) {
+      source = analyzer.tokenStream(field, new StringReader(queryText));
+    }
+    CachingTokenFilter buffer = new CachingTokenFilter(source);
+    TermAttribute termAtt = null;
+    PositionIncrementAttribute posIncrAtt = null;
+    int numTokens = 0;
+
+    boolean success = false;
+    try {
+      buffer.reset();
+      success = true;
+    } catch (IOException e) {
+      // success==false if we hit an exception
+    }
+    if (success) {
+      if (buffer.hasAttribute(TermAttribute.class)) {
+        termAtt = (TermAttribute) buffer.getAttribute(TermAttribute.class);
+      }
+      if (buffer.hasAttribute(PositionIncrementAttribute.class)) {
+        posIncrAtt = (PositionIncrementAttribute) buffer.getAttribute(PositionIncrementAttribute.class);
+      }
+    }
+
+    int positionCount = 0;
+    boolean severalTokensAtSamePosition = false;
+
+    boolean hasMoreTokens = false;
+    if (termAtt != null) {
+      try {
+        hasMoreTokens = buffer.incrementToken();
+        while (hasMoreTokens) {
+          numTokens++;
+          int positionIncrement = (posIncrAtt != null) ? posIncrAtt.getPositionIncrement() : 1;
+          if (positionIncrement != 0) {
+            positionCount += positionIncrement;
+          } else {
+            severalTokensAtSamePosition = true;
+          }
+          hasMoreTokens = buffer.incrementToken();
+        }
+      } catch (IOException e) {
+        // ignore
+      }
+    }
+    try {
+      // rewind the buffer stream
+      buffer.reset();
+
+      // close original stream - all tokens buffered
+      source.close();
+    }
+    catch (IOException e) {
+      // ignore
+    }
+
+    if (numTokens == 0)
+      return null;
+    else if (numTokens == 1) {
+      String term = null;
+      try {
+        boolean hasNext = buffer.incrementToken();
+        assert hasNext == true;
+        term = termAtt.term();
+      } catch (IOException e) {
+        // safe to ignore, because we know the number of tokens
+      }
+      return newTermQuery(new Term(field, term));
+    } else {
+      if (severalTokensAtSamePosition) {
+        if (positionCount == 1) {
+          // no phrase query:
+          BooleanQuery q = newBooleanQuery(true);
+          for (int i = 0; i < numTokens; i++) {
+            String term = null;
+            try {
+              boolean hasNext = buffer.incrementToken();
+              assert hasNext == true;
+              term = termAtt.term();
+            } catch (IOException e) {
+              // safe to ignore, because we know the number of tokens
+            }
+
+            Query currentQuery = newTermQuery(
+                new Term(field, term));
+            q.add(currentQuery, BooleanClause.Occur.SHOULD);
+          }
+          return q;
+        }
+        else {
+          // phrase query:
+          MultiPhraseQuery mpq = newMultiPhraseQuery();
+          mpq.setSlop(phraseSlop);
+          List multiTerms = new ArrayList();
+          int position = -1;
+          for (int i = 0; i < numTokens; i++) {
+            String term = null;
+            int positionIncrement = 1;
+            try {
+              boolean hasNext = buffer.incrementToken();
+              assert hasNext == true;
+              term = termAtt.term();
+              if (posIncrAtt != null) {
+                positionIncrement = posIncrAtt.getPositionIncrement();
+              }
+            } catch (IOException e) {
+              // safe to ignore, because we know the number of tokens
+            }
+
+            if (positionIncrement > 0 && multiTerms.size() > 0) {
+              if (enablePositionIncrements) {
+                mpq.add((Term[])multiTerms.toArray(new Term[0]),position);
+              } else {
+                mpq.add((Term[])multiTerms.toArray(new Term[0]));
+              }
+              multiTerms.clear();
+            }
+            position += positionIncrement;
+            multiTerms.add(new Term(field, term));
+          }
+          if (enablePositionIncrements) {
+            mpq.add((Term[])multiTerms.toArray(new Term[0]),position);
+          } else {
+            mpq.add((Term[])multiTerms.toArray(new Term[0]));
+          }
+          return mpq;
+        }
+      }
+      else {
+        PhraseQuery pq = newPhraseQuery();
+        pq.setSlop(phraseSlop);
+        int position = -1;
+
+
+        for (int i = 0; i < numTokens; i++) {
+          String term = null;
+          int positionIncrement = 1;
+
+          try {
+            boolean hasNext = buffer.incrementToken();
+            assert hasNext == true;
+            term = termAtt.term();
+            if (posIncrAtt != null) {
+              positionIncrement = posIncrAtt.getPositionIncrement();
+            }
+          } catch (IOException e) {
+            // safe to ignore, because we know the number of tokens
+          }
+
+          if (enablePositionIncrements) {
+            position += positionIncrement;
+            pq.add(new Term(field, term),position);
+          } else {
+            pq.add(new Term(field, term));
+          }
+        }
+        return pq;
+      }
+    }
+  }
+
+
+
+  /**
+   * Base implementation delegates to {@link #getFieldQuery(String,String)}.
+   * This method may be overridden, for example, to return
+   * a SpanNearQuery instead of a PhraseQuery.
+   *
+   * @exception ParseException throw in overridden method to disallow
+   */
+  protected Query getFieldQuery(String field, String queryText, int slop)
+        throws ParseException {
+    Query query = getFieldQuery(field, queryText);
+
+    if (query instanceof PhraseQuery) {
+      ((PhraseQuery) query).setSlop(slop);
+    }
+    if (query instanceof MultiPhraseQuery) {
+      ((MultiPhraseQuery) query).setSlop(slop);
+    }
+
+    return query;
+  }
+
+
+  /**
+   * @exception ParseException throw in overridden method to disallow
+   */
+  protected Query getRangeQuery(String field,
+                                String part1,
+                                String part2,
+                                boolean inclusive) throws ParseException
+  {
+    if (lowercaseExpandedTerms) {
+      part1 = part1.toLowerCase();
+      part2 = part2.toLowerCase();
+    }
+    try {
+      DateFormat df = DateFormat.getDateInstance(DateFormat.SHORT, locale);
+      df.setLenient(true);
+      Date d1 = df.parse(part1);
+      Date d2 = df.parse(part2);
+      if (inclusive) {
+        // The user can only specify the date, not the time, so make sure
+        // the time is set to the latest possible time of that date to really
+        // include all documents:
+        Calendar cal = Calendar.getInstance(locale);
+        cal.setTime(d2);
+        cal.set(Calendar.HOUR_OF_DAY, 23);
+        cal.set(Calendar.MINUTE, 59);
+        cal.set(Calendar.SECOND, 59);
+        cal.set(Calendar.MILLISECOND, 999);
+        d2 = cal.getTime();
+      }
+      DateTools.Resolution resolution = getDateResolution(field);
+      if (resolution == null) {
+        // no default or field specific date resolution has been set,
+        // use deprecated DateField to maintain compatibility with
+        // pre-1.9 Lucene versions.
+        part1 = DateField.dateToString(d1);
+        part2 = DateField.dateToString(d2);
+      } else {
+        part1 = DateTools.dateToString(d1, resolution);
+        part2 = DateTools.dateToString(d2, resolution);
+      }
+    }
+    catch (Exception e) { }
+
+    return newRangeQuery(field, part1, part2, inclusive);
+  }
+
+ /**
+  * Builds a new BooleanQuery instance
+  * @param disableCoord disable coord
+  * @return new BooleanQuery instance
+  */
+  protected BooleanQuery newBooleanQuery(boolean disableCoord) {
+    return new BooleanQuery(disableCoord); 
+  }
+
+ /**
+  * Builds a new BooleanClause instance
+  * @param q sub query
+  * @param occur how this clause should occur when matching documents
+  * @return new BooleanClause instance
+  */
+  protected BooleanClause newBooleanClause(Query q, BooleanClause.Occur occur) {
+    return new BooleanClause(q, occur);
+  }
+  /**
+   * Builds a new TermQuery instance
+   * @param term term
+   * @return new TermQuery instance
+   */
+  protected Query newTermQuery(Term term){
+    return new TermQuery(term);
+  }
+  /**
+   * Builds a new PhraseQuery instance
+   * @return new PhraseQuery instance
+   */
+  protected PhraseQuery newPhraseQuery(){
+    return new PhraseQuery();
+  }
+  /**
+   * Builds a new MultiPhraseQuery instance
+   * @return new MultiPhraseQuery instance
+   */
+  protected MultiPhraseQuery newMultiPhraseQuery(){
+    return new MultiPhraseQuery();
+  }
+  /**
+   * Builds a new PrefixQuery instance
+   * @param prefix Prefix term
+   * @return new PrefixQuery instance
+   */
+  protected Query newPrefixQuery(Term prefix){
+    PrefixQuery query = new PrefixQuery(prefix);
+    query.setRewriteMethod(multiTermRewriteMethod);
+    return query;
+  }
+  /**
+   * Builds a new FuzzyQuery instance
+   * @param term Term
+   * @param minimumSimilarity minimum similarity
+   * @param prefixLength prefix length
+   * @return new FuzzyQuery Instance
+   */
+  protected Query newFuzzyQuery(Term term, float minimumSimilarity, int prefixLength) {
+    // FuzzyQuery doesn't yet allow constant score rewrite
+    return new FuzzyQuery(term,minimumSimilarity,prefixLength);
+  }
+
+  /**
+   * Builds a new TermRangeQuery instance
+   * @param field Field
+   * @param part1 min
+   * @param part2 max
+   * @param inclusive true if range is inclusive
+   * @return new TermRangeQuery instance
+   */
+  protected Query newRangeQuery(String field, String part1, String part2, boolean inclusive) {
+    final TermRangeQuery query = new TermRangeQuery(field, part1, part2, inclusive, inclusive, rangeCollator);
+    query.setRewriteMethod(multiTermRewriteMethod);
+    return query;
+  }
+  
+  /**
+   * Builds a new MatchAllDocsQuery instance
+   * @return new MatchAllDocsQuery instance
+   */
+  protected Query newMatchAllDocsQuery() {
+    return new MatchAllDocsQuery(); 
+  }
+
+  /**
+   * Builds a new WildcardQuery instance
+   * @param t wildcard term
+   * @return new WildcardQuery instance
+   */
+  protected Query newWildcardQuery(Term t) {
+    WildcardQuery query = new WildcardQuery(t);
+    query.setRewriteMethod(multiTermRewriteMethod);
+    return query;
+  }
+
+  /**
+   * Factory method for generating query, given a set of clauses.
+   * By default creates a boolean query composed of clauses passed in.
+   *
+   * Can be overridden by extending classes, to modify query being
+   * returned.
+   *
+   * @param clauses List that contains {@link BooleanClause} instances
+   *    to join.
+   *
+   * @return Resulting {@link Query} object.
+   * @exception ParseException throw in overridden method to disallow
+   * @deprecated use {@link #getBooleanQuery(List)} instead
+   */
+  protected Query getBooleanQuery(Vector clauses) throws ParseException {
+    return getBooleanQuery((List) clauses, false);
+  }
+
+  /**
+   * Factory method for generating query, given a set of clauses.
+   * By default creates a boolean query composed of clauses passed in.
+   *
+   * Can be overridden by extending classes, to modify query being
+   * returned.
+   *
+   * @param clauses List that contains {@link BooleanClause} instances
+   *    to join.
+   *
+   * @return Resulting {@link Query} object.
+   * @exception ParseException throw in overridden method to disallow
+   */
+  protected Query getBooleanQuery(List clauses) throws ParseException {
+    return getBooleanQuery(clauses, false);
+  }
+
+  /**
+   * Factory method for generating query, given a set of clauses.
+   * By default creates a boolean query composed of clauses passed in.
+   *
+   * Can be overridden by extending classes, to modify query being
+   * returned.
+   *
+   * @param clauses List that contains {@link BooleanClause} instances
+   *    to join.
+   * @param disableCoord true if coord scoring should be disabled.
+   *
+   * @return Resulting {@link Query} object.
+   * @exception ParseException throw in overridden method to disallow
+   * @deprecated use {@link #getBooleanQuery(List, boolean)} instead
+   */
+  protected Query getBooleanQuery(Vector clauses, boolean disableCoord)
+    throws ParseException
+  {
+    return getBooleanQuery((List) clauses, disableCoord);
+  }
+
+  /**
+   * Factory method for generating query, given a set of clauses.
+   * By default creates a boolean query composed of clauses passed in.
+   *
+   * Can be overridden by extending classes, to modify query being
+   * returned.
+   *
+   * @param clauses List that contains {@link BooleanClause} instances
+   *    to join.
+   * @param disableCoord true if coord scoring should be disabled.
+   *
+   * @return Resulting {@link Query} object.
+   * @exception ParseException throw in overridden method to disallow
+   */
+  protected Query getBooleanQuery(List clauses, boolean disableCoord)
+    throws ParseException
+  {
+    if (clauses.size()==0) {
+      return null; // all clause words were filtered away by the analyzer.
+    }
+    BooleanQuery query = newBooleanQuery(disableCoord);
+    for (int i = 0; i < clauses.size(); i++) {
+      query.add((BooleanClause)clauses.get(i));
+    }
+    return query;
+  }
+
+  /**
+   * Factory method for generating a query. Called when parser
+   * parses an input term token that contains one or more wildcard
+   * characters (? and *), but is not a prefix term token (one
+   * that has just a single * character at the end)
+   *<p>
+   * Depending on settings, prefix term may be lower-cased
+   * automatically. It will not go through the default Analyzer,
+   * however, since normal Analyzers are unlikely to work properly
+   * with wildcard templates.
+   *<p>
+   * Can be overridden by extending classes, to provide custom handling for
+   * wildcard queries, which may be necessary due to missing analyzer calls.
+   *
+   * @param field Name of the field query will use.
+   * @param termStr Term token that contains one or more wild card
+   *   characters (? or *), but is not simple prefix term
+   *
+   * @return Resulting {@link Query} built for the term
+   * @exception ParseException throw in overridden method to disallow
+   */
+  protected Query getWildcardQuery(String field, String termStr) throws ParseException
+  {
+    if ("*".equals(field)) {
+      if ("*".equals(termStr)) return newMatchAllDocsQuery();
+    }
+    if (!allowLeadingWildcard && (termStr.startsWith("*") || termStr.startsWith("?")))
+      throw new ParseException("'*' or '?' not allowed as first character in WildcardQuery");
+    if (lowercaseExpandedTerms) {
+      termStr = termStr.toLowerCase();
+    }
+    Term t = new Term(field, termStr);
+    return newWildcardQuery(t);
+  }
+
+  /**
+   * Factory method for generating a query (similar to
+   * {@link #getWildcardQuery}). Called when parser parses an input term
+   * token that uses prefix notation; that is, contains a single '*' wildcard
+   * character as its last character. Since this is a special case
+   * of generic wildcard term, and such a query can be optimized easily,
+   * this usually results in a different query object.
+   *<p>
+   * Depending on settings, a prefix term may be lower-cased
+   * automatically. It will not go through the default Analyzer,
+   * however, since normal Analyzers are unlikely to work properly
+   * with wildcard templates.
+   *<p>
+   * Can be overridden by extending classes, to provide custom handling for
+   * wild card queries, which may be necessary due to missing analyzer calls.
+   *
+   * @param field Name of the field query will use.
+   * @param termStr Term token to use for building term for the query
+   *    (<b>without</b> trailing '*' character!)
+   *
+   * @return Resulting {@link Query} built for the term
+   * @exception ParseException throw in overridden method to disallow
+   */
+  protected Query getPrefixQuery(String field, String termStr) throws ParseException
+  {
+    if (!allowLeadingWildcard && termStr.startsWith("*"))
+      throw new ParseException("'*' not allowed as first character in PrefixQuery");
+    if (lowercaseExpandedTerms) {
+      termStr = termStr.toLowerCase();
+    }
+    Term t = new Term(field, termStr);
+    return newPrefixQuery(t);
+  }
+
+   /**
+   * Factory method for generating a query (similar to
+   * {@link #getWildcardQuery}). Called when parser parses
+   * an input term token that has the fuzzy suffix (~) appended.
+   *
+   * @param field Name of the field query will use.
+   * @param termStr Term token to use for building term for the query
+   *
+   * @return Resulting {@link Query} built for the term
+   * @exception ParseException throw in overridden method to disallow
+   */
+  protected Query getFuzzyQuery(String field, String termStr, float minSimilarity) throws ParseException
+  {
+    if (lowercaseExpandedTerms) {
+      termStr = termStr.toLowerCase();
+    }
+    Term t = new Term(field, termStr);
+    return newFuzzyQuery(t, minSimilarity, fuzzyPrefixLength);
+  }
+
+  /**
+   * Returns a String where the escape char has been
+   * removed, or kept only once if there was a double escape.
+   * 
+   * Supports escaped unicode characters, e. g. translates
+   * <code>\\u0041</code> to <code>A</code>.
+   * 
+   */
+  private String discardEscapeChar(String input) throws ParseException {
+    // Create char array to hold unescaped char sequence
+    char[] output = new char[input.length()];
+
+    // The length of the output can be less than the input
+    // due to discarded escape chars. This variable holds
+    // the actual length of the output
+    int length = 0;
+
+    // We remember whether the last processed character was 
+    // an escape character
+    boolean lastCharWasEscapeChar = false;
+
+    // The multiplier the current unicode digit must be multiplied with.
+    // E. g. the first digit must be multiplied with 16^3, the second with 16^2...
+    int codePointMultiplier = 0;
+
+    // Used to calculate the codepoint of the escaped unicode character
+    int codePoint = 0;
+
+    for (int i = 0; i < input.length(); i++) {
+      char curChar = input.charAt(i);
+      if (codePointMultiplier > 0) {
+        codePoint += hexToInt(curChar) * codePointMultiplier;
+        codePointMultiplier >>>= 4;
+        if (codePointMultiplier == 0) {
+          output[length++] = (char)codePoint;
+          codePoint = 0;
+        }
+      } else if (lastCharWasEscapeChar) {
+        if (curChar == 'u') {
+          // found an escaped unicode character
+          codePointMultiplier = 16 * 16 * 16;
+        } else {
+          // this character was escaped
+          output[length] = curChar;
+          length++;
+        }
+        lastCharWasEscapeChar = false;
+      } else {
+        if (curChar == '\\') {
+          lastCharWasEscapeChar = true;
+        } else {
+          output[length] = curChar;
+          length++;
+        }
+      }
+    }
+
+    if (codePointMultiplier > 0) {
+      throw new ParseException("Truncated unicode escape sequence.");
+    }
+
+    if (lastCharWasEscapeChar) {
+      throw new ParseException("Term can not end with escape character.");
+    }
+
+    return new String(output, 0, length);
+  }
+
+  /** Returns the numeric value of the hexadecimal character */
+  private static final int hexToInt(char c) throws ParseException {
+    if ('0' <= c && c <= '9') {
+      return c - '0';
+    } else if ('a' <= c && c <= 'f'){
+      return c - 'a' + 10;
+    } else if ('A' <= c && c <= 'F') {
+      return c - 'A' + 10;
+    } else {
+      throw new ParseException("None-hex character in unicode escape sequence: " + c);
+    }
+  }
+
+  /**
+   * Returns a String where those characters that QueryParser
+   * expects to be escaped are escaped by a preceding <code>\</code>.
+   */
+  public static String escape(String s) {
+    StringBuffer sb = new StringBuffer();
+    for (int i = 0; i < s.length(); i++) {
+      char c = s.charAt(i);
+      // These characters are part of the query syntax and must be escaped
+      if (c == '\\' || c == '+' || c == '-' || c == '!' || c == '(' || c == ')' || c == ':'
+        || c == '^' || c == '[' || c == ']' || c == '\"' || c == '{' || c == '}' || c == '~'
+        || c == '*' || c == '?' || c == '|' || c == '&') {
+        sb.append('\\');
+      }
+      sb.append(c);
+    }
+    return sb.toString();
+  }
+
+  /**
+   * Command line tool to test QueryParser, using {@link org.apache.lucene.analysis.SimpleAnalyzer}.
+   * Usage:<br>
+   * <code>java org.apache.lucene.queryParser.QueryParser &lt;input&gt;</code>
+   */
+  public static void main(String[] args) throws Exception {
+    if (args.length == 0) {
+      System.out.println("Usage: java org.apache.lucene.queryParser.QueryParser <input>");
+      System.exit(0);
+    }
+    QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "field",
+                           new org.apache.lucene.analysis.SimpleAnalyzer());
+    Query q = qp.parse(args[0]);
+    System.out.println(q.toString("field"));
+  }
+}
+
+PARSER_END(QueryParser)
+
+/* ***************** */
+/* Token Definitions */
+/* ***************** */
+
+<*> TOKEN : {
+  <#_NUM_CHAR:   ["0"-"9"] >
+// every character that follows a backslash is considered as an escaped character
+| <#_ESCAPED_CHAR: "\\" ~[] >
+| <#_TERM_START_CHAR: ( ~[ " ", "\t", "\n", "\r", "\u3000", "+", "-", "!", "(", ")", ":", "^",
+                           "[", "]", "\"", "{", "}", "~", "*", "?", "\\" ]
+                       | <_ESCAPED_CHAR> ) >
+| <#_TERM_CHAR: ( <_TERM_START_CHAR> | <_ESCAPED_CHAR> | "-" | "+" ) >
+| <#_WHITESPACE: ( " " | "\t" | "\n" | "\r" | "\u3000") >
+| <#_QUOTED_CHAR: ( ~[ "\"", "\\" ] | <_ESCAPED_CHAR> ) >
+}
+
+<DEFAULT, RangeIn, RangeEx> SKIP : {
+  < <_WHITESPACE>>
+}
+
+<DEFAULT> TOKEN : {
+  <AND:       ("AND" | "&&") >
+| <OR:        ("OR" | "||") >
+| <NOT:       ("NOT" | "!") >
+| <PLUS:      "+" >
+| <MINUS:     "-" >
+| <LPAREN:    "(" >
+| <RPAREN:    ")" >
+| <COLON:     ":" >
+| <STAR:      "*" >
+| <CARAT:     "^" > : Boost
+| <QUOTED:     "\"" (<_QUOTED_CHAR>)* "\"">
+| <TERM:      <_TERM_START_CHAR> (<_TERM_CHAR>)*  >
+| <FUZZY_SLOP:     "~" ( (<_NUM_CHAR>)+ ( "." (<_NUM_CHAR>)+ )? )? >
+| <PREFIXTERM:  ("*") | ( <_TERM_START_CHAR> (<_TERM_CHAR>)* "*" ) >
+| <WILDTERM:  (<_TERM_START_CHAR> | [ "*", "?" ]) (<_TERM_CHAR> | ( [ "*", "?" ] ))* >
+| <RANGEIN_START: "[" > : RangeIn
+| <RANGEEX_START: "{" > : RangeEx
+}
+
+<Boost> TOKEN : {
+<NUMBER:    (<_NUM_CHAR>)+ ( "." (<_NUM_CHAR>)+ )? > : DEFAULT
+}
+
+<RangeIn> TOKEN : {
+<RANGEIN_TO: "TO">
+| <RANGEIN_END: "]"> : DEFAULT
+| <RANGEIN_QUOTED: "\"" (~["\""] | "\\\"")+ "\"">
+| <RANGEIN_GOOP: (~[ " ", "]" ])+ >
+}
+
+<RangeEx> TOKEN : {
+<RANGEEX_TO: "TO">
+| <RANGEEX_END: "}"> : DEFAULT
+| <RANGEEX_QUOTED: "\"" (~["\""] | "\\\"")+ "\"">
+| <RANGEEX_GOOP: (~[ " ", "}" ])+ >
+}
+
+// *   Query  ::= ( Clause )*
+// *   Clause ::= ["+", "-"] [<TERM> ":"] ( <TERM> | "(" Query ")" )
+
+int Conjunction() : {
+  int ret = CONJ_NONE;
+}
+{
+  [
+    <AND> { ret = CONJ_AND; }
+    | <OR>  { ret = CONJ_OR; }
+  ]
+  { return ret; }
+}
+
+int Modifiers() : {
+  int ret = MOD_NONE;
+}
+{
+  [
+     <PLUS> { ret = MOD_REQ; }
+     | <MINUS> { ret = MOD_NOT; }
+     | <NOT> { ret = MOD_NOT; }
+  ]
+  { return ret; }
+}
+
+// This makes sure that there is no garbage after the query string
+Query TopLevelQuery(String field) : 
+{
+       Query q;
+}
+{
+       q=Query(field) <EOF>
+       {
+               return q;
+       }
+}
+
+Query Query(String field) :
+{
+  List clauses = new ArrayList();
+  Query q, firstQuery=null;
+  int conj, mods;
+}
+{
+  mods=Modifiers() q=Clause(field)
+  {
+    addClause(clauses, CONJ_NONE, mods, q);
+    if (mods == MOD_NONE)
+        firstQuery=q;
+  }
+  (
+    conj=Conjunction() mods=Modifiers() q=Clause(field)
+    { addClause(clauses, conj, mods, q); }
+  )*
+    {
+      if (clauses.size() == 1 && firstQuery != null)
+        return firstQuery;
+      else {
+  return getBooleanQuery(clauses);
+      }
+    }
+}
+
+Query Clause(String field) : {
+  Query q;
+  Token fieldToken=null, boost=null;
+}
+{
+  [
+    LOOKAHEAD(2)
+    (
+    fieldToken=<TERM> <COLON> {field=discardEscapeChar(fieldToken.image);}
+    | <STAR> <COLON> {field="*";}
+    )
+  ]
+
+  (
+   q=Term(field)
+   | <LPAREN> q=Query(field) <RPAREN> (<CARAT> boost=<NUMBER>)?
+
+  )
+    {
+      if (boost != null) {
+        float f = (float)1.0;
+  try {
+    f = Float.valueOf(boost.image).floatValue();
+          q.setBoost(f);
+  } catch (Exception ignored) { }
+      }
+      return q;
+    }
+}
+
+
+Query Term(String field) : {
+  Token term, boost=null, fuzzySlop=null, goop1, goop2;
+  boolean prefix = false;
+  boolean wildcard = false;
+  boolean fuzzy = false;
+  Query q;
+}
+{
+  (
+     (
+       term=<TERM>
+       | term=<STAR> { wildcard=true; }
+       | term=<PREFIXTERM> { prefix=true; }
+       | term=<WILDTERM> { wildcard=true; }
+       | term=<NUMBER>
+     )
+     [ fuzzySlop=<FUZZY_SLOP> { fuzzy=true; } ]
+     [ <CARAT> boost=<NUMBER> [ fuzzySlop=<FUZZY_SLOP> { fuzzy=true; } ] ]
+     {
+       String termImage=discardEscapeChar(term.image);
+       if (wildcard) {
+       q = getWildcardQuery(field, termImage);
+       } else if (prefix) {
+         q = getPrefixQuery(field,
+           discardEscapeChar(term.image.substring
+          (0, term.image.length()-1)));
+       } else if (fuzzy) {
+                 float fms = fuzzyMinSim;
+                 try {
+            fms = Float.valueOf(fuzzySlop.image.substring(1)).floatValue();
+                 } catch (Exception ignored) { }
+                if(fms < 0.0f || fms > 1.0f){
+                  throw new ParseException("Minimum similarity for a FuzzyQuery has to be between 0.0f and 1.0f !");
+                }
+                q = getFuzzyQuery(field, termImage,fms);
+       } else {
+         q = getFieldQuery(field, termImage);
+       }
+     }
+     | ( <RANGEIN_START> ( goop1=<RANGEIN_GOOP>|goop1=<RANGEIN_QUOTED> )
+         [ <RANGEIN_TO> ] ( goop2=<RANGEIN_GOOP>|goop2=<RANGEIN_QUOTED> )
+         <RANGEIN_END> )
+       [ <CARAT> boost=<NUMBER> ]
+        {
+          if (goop1.kind == RANGEIN_QUOTED) {
+            goop1.image = goop1.image.substring(1, goop1.image.length()-1);
+          }
+          if (goop2.kind == RANGEIN_QUOTED) {
+            goop2.image = goop2.image.substring(1, goop2.image.length()-1);
+          }
+          q = getRangeQuery(field, discardEscapeChar(goop1.image), discardEscapeChar(goop2.image), true);
+        }
+     | ( <RANGEEX_START> ( goop1=<RANGEEX_GOOP>|goop1=<RANGEEX_QUOTED> )
+         [ <RANGEEX_TO> ] ( goop2=<RANGEEX_GOOP>|goop2=<RANGEEX_QUOTED> )
+         <RANGEEX_END> )
+       [ <CARAT> boost=<NUMBER> ]
+        {
+          if (goop1.kind == RANGEEX_QUOTED) {
+            goop1.image = goop1.image.substring(1, goop1.image.length()-1);
+          }
+          if (goop2.kind == RANGEEX_QUOTED) {
+            goop2.image = goop2.image.substring(1, goop2.image.length()-1);
+          }
+
+          q = getRangeQuery(field, discardEscapeChar(goop1.image), discardEscapeChar(goop2.image), false);
+        }
+     | term=<QUOTED>
+       [ fuzzySlop=<FUZZY_SLOP> ]
+       [ <CARAT> boost=<NUMBER> ]
+       {
+         int s = phraseSlop;
+
+         if (fuzzySlop != null) {
+           try {
+             s = Float.valueOf(fuzzySlop.image.substring(1)).intValue();
+           }
+           catch (Exception ignored) { }
+         }
+         q = getFieldQuery(field, discardEscapeChar(term.image.substring(1, term.image.length()-1)), s);
+       }
+  )
+  {
+    if (boost != null) {
+      float f = (float) 1.0;
+      try {
+        f = Float.valueOf(boost.image).floatValue();
+      }
+      catch (Exception ignored) {
+    /* Should this be handled somehow? (defaults to "no boost", if
+     * boost number is invalid)
+     */
+      }
+
+      // avoid boosting null queries, such as those caused by stop words
+      if (q != null) {
+        q.setBoost(f);
+      }
+    }
+    return q;
+  }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/QueryParser/QueryParser.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/QueryParser/QueryParser.cs
new file mode 100644 (file)
index 0000000..211bc53
--- /dev/null
@@ -0,0 +1,2363 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* Generated By:JavaCC: Do not edit this line. QueryParser.java */
+
+using System;
+
+using Analyzer = Mono.Lucene.Net.Analysis.Analyzer;
+using CachingTokenFilter = Mono.Lucene.Net.Analysis.CachingTokenFilter;
+using TokenStream = Mono.Lucene.Net.Analysis.TokenStream;
+using PositionIncrementAttribute = Mono.Lucene.Net.Analysis.Tokenattributes.PositionIncrementAttribute;
+using TermAttribute = Mono.Lucene.Net.Analysis.Tokenattributes.TermAttribute;
+using DateField = Mono.Lucene.Net.Documents.DateField;
+using DateTools = Mono.Lucene.Net.Documents.DateTools;
+using Term = Mono.Lucene.Net.Index.Term;
+using Parameter = Mono.Lucene.Net.Util.Parameter;
+using BooleanClause = Mono.Lucene.Net.Search.BooleanClause;
+using BooleanQuery = Mono.Lucene.Net.Search.BooleanQuery;
+using FuzzyQuery = Mono.Lucene.Net.Search.FuzzyQuery;
+using MatchAllDocsQuery = Mono.Lucene.Net.Search.MatchAllDocsQuery;
+using MultiPhraseQuery = Mono.Lucene.Net.Search.MultiPhraseQuery;
+using MultiTermQuery = Mono.Lucene.Net.Search.MultiTermQuery;
+using PhraseQuery = Mono.Lucene.Net.Search.PhraseQuery;
+using PrefixQuery = Mono.Lucene.Net.Search.PrefixQuery;
+using Query = Mono.Lucene.Net.Search.Query;
+using TermQuery = Mono.Lucene.Net.Search.TermQuery;
+using TermRangeQuery = Mono.Lucene.Net.Search.TermRangeQuery;
+using WildcardQuery = Mono.Lucene.Net.Search.WildcardQuery;
+using Version = Mono.Lucene.Net.Util.Version;
+
+namespace Mono.Lucene.Net.QueryParsers
+{
+       
+       /// <summary> This class is generated by JavaCC.  The most important method is
+       /// {@link #Parse(String)}.
+       /// 
+       /// The syntax for query strings is as follows:
+       /// A Query is a series of clauses.
+       /// A clause may be prefixed by:
+       /// <ul>
+       /// <li> a plus (<code>+</code>) or a minus (<code>-</code>) sign, indicating
+       /// that the clause is required or prohibited respectively; or</li>
+       /// <li> a term followed by a colon, indicating the field to be searched.
+       /// This enables one to construct queries which search multiple fields.</li>
+       /// </ul>
+       /// 
+       /// A clause may be either:
+       /// <ul>
+       /// <li> a term, indicating all the documents that contain this term; or</li>
+       /// <li> a nested query, enclosed in parentheses.  Note that this may be used
+       /// with a <code>+</code>/<code>-</code> prefix to require any of a set of
+       /// terms.</li>
+       /// </ul>
+       /// 
+       /// Thus, in BNF, the query grammar is:
+       /// <pre>
+       /// Query  ::= ( Clause )*
+       /// Clause ::= ["+", "-"] [&lt;TERM&gt; ":"] ( &lt;TERM&gt; | "(" Query ")" )
+       /// </pre>
+       /// 
+       /// <p/>
+       /// Examples of appropriately formatted queries can be found in the <a
+       /// href="../../../../../../queryparsersyntax.html">query syntax
+       /// documentation</a>.
+       /// <p/>
+       /// 
+       /// <p/>
+       /// In {@link TermRangeQuery}s, QueryParser tries to detect date values, e.g.
+       /// <tt>date:[6/1/2005 TO 6/4/2005]</tt> produces a range query that searches
+       /// for "date" fields between 2005-06-01 and 2005-06-04. Note that the format
+       /// of the accepted input depends on {@link #SetLocale(Locale) the locale}.
+       /// By default a date is converted into a search term using the deprecated
+       /// {@link DateField} for compatibility reasons.
+       /// To use the new {@link DateTools} to convert dates, a
+       /// {@link Mono.Lucene.Net.Documents.DateTools.Resolution} has to be set.
+       /// <p/>
+       /// <p/>
+       /// The date resolution that shall be used for RangeQueries can be set
+       /// using {@link #SetDateResolution(DateTools.Resolution)}
+       /// or {@link #SetDateResolution(String, DateTools.Resolution)}. The former
+       /// sets the default date resolution for all fields, whereas the latter can
+       /// be used to set field specific date resolutions. Field specific date
+       /// resolutions take, if set, precedence over the default date resolution.
+       /// <p/>
+       /// <p/>
+       /// If you use neither {@link DateField} nor {@link DateTools} in your
+       /// index, you can create your own
+       /// query parser that inherits QueryParser and overwrites
+       /// {@link #GetRangeQuery(String, String, String, boolean)} to
+       /// use a different method for date conversion.
+       /// <p/>
+       /// 
+       /// <p/>Note that QueryParser is <em>not</em> thread-safe.<p/> 
+       /// 
+       /// <p/><b>NOTE</b>: there is a new QueryParser in contrib, which matches
+       /// the same syntax as this class, but is more modular,
+       /// enabling substantial customization to how a query is created.
+       /// 
+       /// <p/><b>NOTE</b>: there is a new QueryParser in contrib, which matches
+       /// the same syntax as this class, but is more modular,
+       /// enabling substantial customization to how a query is created.
+       /// <b>NOTE</b>: You must specify the required {@link Version} compatibility when
+       /// creating QueryParser:
+       /// <ul>
+       /// <li>As of 2.9, {@link #SetEnablePositionIncrements} is true by default.</li>
+       /// </ul>
+       /// </summary>
+       public class QueryParser : QueryParserConstants
+       {
+               private void  InitBlock()
+               {
+                       multiTermRewriteMethod = MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT;
+                       fuzzyMinSim = FuzzyQuery.defaultMinSimilarity;
+                       fuzzyPrefixLength = FuzzyQuery.defaultPrefixLength;
+                       jj_2_rtns = new JJCalls[1];
+                       jj_ls = new LookaheadSuccess();
+               }
+               
+               private const int CONJ_NONE = 0;
+               private const int CONJ_AND = 1;
+               private const int CONJ_OR = 2;
+               
+               private const int MOD_NONE = 0;
+               private const int MOD_NOT = 10;
+               private const int MOD_REQ = 11;
+               
+               // make it possible to call setDefaultOperator() without accessing 
+               // the nested class:
+               /// <summary>Alternative form of QueryParser.Operator.AND </summary>
+               public static readonly Operator AND_OPERATOR = Operator.AND;
+               /// <summary>Alternative form of QueryParser.Operator.OR </summary>
+               public static readonly Operator OR_OPERATOR = Operator.OR;
+               
+               /// <summary>The actual operator that parser uses to combine query terms </summary>
+               private Operator operator_Renamed = OR_OPERATOR;
+               
+               internal bool lowercaseExpandedTerms = true;
+               internal MultiTermQuery.RewriteMethod multiTermRewriteMethod;
+               internal bool allowLeadingWildcard = false;
+               internal bool enablePositionIncrements = true;
+               
+               internal Analyzer analyzer;
+               internal System.String field;
+               internal int phraseSlop = 0;
+               internal float fuzzyMinSim;
+               internal int fuzzyPrefixLength;
+               internal System.Globalization.CultureInfo locale = System.Threading.Thread.CurrentThread.CurrentCulture;
+               
+               // the default date resolution
+               internal DateTools.Resolution dateResolution = null;
+               // maps field names to date resolutions
+               internal System.Collections.IDictionary fieldToDateResolution = null;
+               
+               // The collator to use when determining range inclusion,
+               // for use when constructing RangeQuerys.
+               internal System.Globalization.CompareInfo rangeCollator = null;
+               
+               /// <summary>The default operator for parsing queries. 
+               /// Use {@link QueryParser#setDefaultOperator} to change it.
+               /// </summary>
+               [Serializable]
+               public sealed class Operator:Parameter
+               {
+                       internal Operator(System.String name):base(name)
+                       {
+                       }
+                       public static readonly Operator OR = new Operator("OR");
+                       public static readonly Operator AND = new Operator("AND");
+               }
+               
+               
+               /// <summary>Constructs a query parser.</summary>
+               /// <param name="f"> the default field for query terms.
+               /// </param>
+               /// <param name="a">  used to find terms in the query text.
+               /// </param>
+               /// <deprecated> Use {@link #QueryParser(Version, String, Analyzer)} instead
+               /// </deprecated>
+        [Obsolete("Use QueryParser(Version, String, Analyzer) instead")]
+               public QueryParser(System.String f, Analyzer a):this(Version.LUCENE_24, f, a)
+               {
+               }
+               
+               /// <summary> Constructs a query parser.
+               /// 
+               /// </summary>
+               /// <param name="matchVersion">Lucene version to match. See <a href="#version">above</a>)
+               /// </param>
+               /// <param name="f">the default field for query terms.
+               /// </param>
+               /// <param name="a">used to find terms in the query text.
+               /// </param>
+               public QueryParser(Version matchVersion, System.String f, Analyzer a):this(new FastCharStream(new System.IO.StringReader("")))
+               {
+                       analyzer = a;
+                       field = f;
+                       if (matchVersion.OnOrAfter(Version.LUCENE_29))
+                       {
+                               enablePositionIncrements = true;
+                       }
+                       else
+                       {
+                               enablePositionIncrements = false;
+                       }
+               }
+               
+               /// <summary>Parses a query string, returning a {@link Mono.Lucene.Net.Search.Query}.</summary>
+               /// <param name="query"> the query string to be parsed.
+               /// </param>
+               /// <throws>  ParseException if the parsing fails </throws>
+               public virtual Query Parse(System.String query)
+               {
+                       ReInit(new FastCharStream(new System.IO.StringReader(query)));
+                       try
+                       {
+                               // TopLevelQuery is a Query followed by the end-of-input (EOF)
+                               Query res = TopLevelQuery(field);
+                               return res != null?res:NewBooleanQuery(false);
+                       }
+                       catch (ParseException tme)
+                       {
+                               // rethrow to include the original query:
+                               ParseException e = new ParseException("Cannot parse '" + query + "': " + tme.Message, tme);
+                               throw e;
+                       }
+                       catch (TokenMgrError tme)
+                       {
+                               ParseException e = new ParseException("Cannot parse '" + query + "': " + tme.Message, tme);
+                               throw e;
+                       }
+                       catch (BooleanQuery.TooManyClauses tmc)
+                       {
+                               ParseException e = new ParseException("Cannot parse '" + query + "': too many boolean clauses", tmc);
+                               throw e;
+                       }
+               }
+               
+               /// <returns> Returns the analyzer.
+               /// </returns>
+               public virtual Analyzer GetAnalyzer()
+               {
+                       return analyzer;
+               }
+               
+               /// <returns> Returns the field.
+               /// </returns>
+               public virtual System.String GetField()
+               {
+                       return field;
+               }
+               
+               /// <summary> Get the minimal similarity for fuzzy queries.</summary>
+               public virtual float GetFuzzyMinSim()
+               {
+                       return fuzzyMinSim;
+               }
+               
+               /// <summary> Set the minimum similarity for fuzzy queries.
+               /// Default is 0.5f.
+               /// </summary>
+               public virtual void  SetFuzzyMinSim(float fuzzyMinSim)
+               {
+                       this.fuzzyMinSim = fuzzyMinSim;
+               }
+               
+               /// <summary> Get the prefix length for fuzzy queries. </summary>
+               /// <returns> Returns the fuzzyPrefixLength.
+               /// </returns>
+               public virtual int GetFuzzyPrefixLength()
+               {
+                       return fuzzyPrefixLength;
+               }
+               
+               /// <summary> Set the prefix length for fuzzy queries. Default is 0.</summary>
+               /// <param name="fuzzyPrefixLength">The fuzzyPrefixLength to set.
+               /// </param>
+               public virtual void  SetFuzzyPrefixLength(int fuzzyPrefixLength)
+               {
+                       this.fuzzyPrefixLength = fuzzyPrefixLength;
+               }
+               
+               /// <summary> Sets the default slop for phrases.  If zero, then exact phrase matches
+               /// are required.  Default value is zero.
+               /// </summary>
+               public virtual void  SetPhraseSlop(int phraseSlop)
+               {
+                       this.phraseSlop = phraseSlop;
+               }
+               
+               /// <summary> Gets the default slop for phrases.</summary>
+               public virtual int GetPhraseSlop()
+               {
+                       return phraseSlop;
+               }
+               
+               
+               /// <summary> Set to <code>true</code> to allow leading wildcard characters.
+               /// <p/>
+               /// When set, <code>*</code> or <code>?</code> are allowed as 
+               /// the first character of a PrefixQuery and WildcardQuery.
+               /// Note that this can produce very slow
+               /// queries on big indexes. 
+               /// <p/>
+               /// Default: false.
+               /// </summary>
+               public virtual void  SetAllowLeadingWildcard(bool allowLeadingWildcard)
+               {
+                       this.allowLeadingWildcard = allowLeadingWildcard;
+               }
+               
+               /// <seealso cref="SetAllowLeadingWildcard(boolean)">
+               /// </seealso>
+               public virtual bool GetAllowLeadingWildcard()
+               {
+                       return allowLeadingWildcard;
+               }
+               
+               /// <summary> Set to <code>true</code> to enable position increments in result query.
+               /// <p/>
+               /// When set, result phrase and multi-phrase queries will
+               /// be aware of position increments.
+               /// Useful when e.g. a StopFilter increases the position increment of
+               /// the token that follows an omitted token.
+               /// <p/>
+               /// Default: false.
+               /// </summary>
+               public virtual void  SetEnablePositionIncrements(bool enable)
+               {
+                       this.enablePositionIncrements = enable;
+               }
+               
+               /// <seealso cref="SetEnablePositionIncrements(boolean)">
+               /// </seealso>
+               public virtual bool GetEnablePositionIncrements()
+               {
+                       return enablePositionIncrements;
+               }
+               
+               /// <summary> Sets the boolean operator of the QueryParser.
+               /// In default mode (<code>OR_OPERATOR</code>) terms without any modifiers
+               /// are considered optional: for example <code>capital of Hungary</code> is equal to
+               /// <code>capital OR of OR Hungary</code>.<br/>
+               /// In <code>AND_OPERATOR</code> mode terms are considered to be in conjunction: the
+               /// above mentioned query is parsed as <code>capital AND of AND Hungary</code>
+               /// </summary>
+               public virtual void  SetDefaultOperator(Operator op)
+               {
+                       this.operator_Renamed = op;
+               }
+               
+               
+               /// <summary> Gets implicit operator setting, which will be either AND_OPERATOR
+               /// or OR_OPERATOR.
+               /// </summary>
+               public virtual Operator GetDefaultOperator()
+               {
+                       return operator_Renamed;
+               }
+               
+               
+               /// <summary> Whether terms of wildcard, prefix, fuzzy and range queries are to be automatically
+               /// lower-cased or not.  Default is <code>true</code>.
+               /// </summary>
+               public virtual void  SetLowercaseExpandedTerms(bool lowercaseExpandedTerms)
+               {
+                       this.lowercaseExpandedTerms = lowercaseExpandedTerms;
+               }
+               
+               
+               /// <seealso cref="SetLowercaseExpandedTerms(boolean)">
+               /// </seealso>
+               public virtual bool GetLowercaseExpandedTerms()
+               {
+                       return lowercaseExpandedTerms;
+               }
+               
+               /// <deprecated> Please use {@link #setMultiTermRewriteMethod} instead.
+               /// </deprecated>
+        [Obsolete("Please use SetMultiTermRewriteMethod instead.")]
+               public virtual void  SetUseOldRangeQuery(bool useOldRangeQuery)
+               {
+                       if (useOldRangeQuery)
+                       {
+                               SetMultiTermRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE);
+                       }
+                       else
+                       {
+                               SetMultiTermRewriteMethod(MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT);
+                       }
+               }
+               
+               
+               /// <deprecated> Please use {@link #getMultiTermRewriteMethod} instead.
+               /// </deprecated>
+        [Obsolete("Please use GetMultiTermRewriteMethod} instead.")]
+               public virtual bool GetUseOldRangeQuery()
+               {
+                       if (GetMultiTermRewriteMethod() == MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE)
+                       {
+                               return true;
+                       }
+                       else
+                       {
+                               return false;
+                       }
+               }
+               
+               /// <summary> By default QueryParser uses {@link MultiTermQuery#CONSTANT_SCORE_AUTO_REWRITE_DEFAULT}
+               /// when creating a PrefixQuery, WildcardQuery or RangeQuery. This implementation is generally preferable because it 
+               /// a) Runs faster b) Does not have the scarcity of terms unduly influence score 
+               /// c) avoids any "TooManyBooleanClauses" exception.
+               /// However, if your application really needs to use the
+               /// old-fashioned BooleanQuery expansion rewriting and the above
+               /// points are not relevant then use this to change
+               /// the rewrite method.
+               /// </summary>
+               public virtual void  SetMultiTermRewriteMethod(MultiTermQuery.RewriteMethod method)
+               {
+                       multiTermRewriteMethod = method;
+               }
+               
+               
+               /// <seealso cref="setMultiTermRewriteMethod">
+               /// </seealso>
+               public virtual MultiTermQuery.RewriteMethod GetMultiTermRewriteMethod()
+               {
+                       return multiTermRewriteMethod;
+               }
+               
+               /// <summary> Set locale used by date range parsing.</summary>
+               public virtual void  SetLocale(System.Globalization.CultureInfo locale)
+               {
+                       this.locale = locale;
+               }
+               
+               /// <summary> Returns current locale, allowing access by subclasses.</summary>
+               public virtual System.Globalization.CultureInfo GetLocale()
+               {
+                       return locale;
+               }
+               
+               /// <summary> Sets the default date resolution used by RangeQueries for fields for which no
+               /// specific date resolutions has been set. Field specific resolutions can be set
+               /// with {@link #SetDateResolution(String, DateTools.Resolution)}.
+               /// 
+               /// </summary>
+               /// <param name="dateResolution">the default date resolution to set
+               /// </param>
+               public virtual void  SetDateResolution(DateTools.Resolution dateResolution)
+               {
+                       this.dateResolution = dateResolution;
+               }
+               
+               /// <summary> Sets the date resolution used by RangeQueries for a specific field.
+               /// 
+               /// </summary>
+               /// <param name="fieldName">field for which the date resolution is to be set 
+               /// </param>
+               /// <param name="dateResolution">date resolution to set
+               /// </param>
+               public virtual void  SetDateResolution(System.String fieldName, DateTools.Resolution dateResolution)
+               {
+                       if (fieldName == null)
+                       {
+                               throw new System.ArgumentException("Field cannot be null.");
+                       }
+                       
+                       if (fieldToDateResolution == null)
+                       {
+                               // lazily initialize HashMap
+                               fieldToDateResolution = new System.Collections.Hashtable();
+                       }
+                       
+                       fieldToDateResolution[fieldName] = dateResolution;
+               }
+               
+               /// <summary> Returns the date resolution that is used by RangeQueries for the given field. 
+               /// Returns null, if no default or field specific date resolution has been set
+               /// for the given field.
+               /// 
+               /// </summary>
+               public virtual DateTools.Resolution GetDateResolution(System.String fieldName)
+               {
+                       if (fieldName == null)
+                       {
+                               throw new System.ArgumentException("Field cannot be null.");
+                       }
+                       
+                       if (fieldToDateResolution == null)
+                       {
+                               // no field specific date resolutions set; return default date resolution instead
+                               return this.dateResolution;
+                       }
+                       
+                       DateTools.Resolution resolution = (DateTools.Resolution) fieldToDateResolution[fieldName];
+                       if (resolution == null)
+                       {
+                               // no date resolutions set for the given field; return default date resolution instead
+                               resolution = this.dateResolution;
+                       }
+                       
+                       return resolution;
+               }
+               
+               /// <summary> Sets the collator used to determine index term inclusion in ranges
+               /// for RangeQuerys.
+               /// <p/>
+               /// <strong>WARNING:</strong> Setting the rangeCollator to a non-null
+               /// collator using this method will cause every single index Term in the
+               /// Field referenced by lowerTerm and/or upperTerm to be examined.
+               /// Depending on the number of index Terms in this Field, the operation could
+               /// be very slow.
+               /// 
+               /// </summary>
+               /// <param name="rc"> the collator to use when constructing RangeQuerys
+               /// </param>
+               public virtual void  SetRangeCollator(System.Globalization.CompareInfo rc)
+               {
+                       rangeCollator = rc;
+               }
+               
+               /// <returns> the collator used to determine index term inclusion in ranges
+               /// for RangeQuerys.
+               /// </returns>
+               public virtual System.Globalization.CompareInfo GetRangeCollator()
+               {
+                       return rangeCollator;
+               }
+               
+               /// <deprecated> use {@link #AddClause(List, int, int, Query)} instead.
+               /// </deprecated>
+        [Obsolete("use AddClause(List, int, int, Query) instead.")]
+               protected internal virtual void  AddClause(System.Collections.ArrayList clauses, int conj, int mods, Query q)
+               {
+                       AddClause((System.Collections.IList) clauses, conj, mods, q);
+               }
+               
+               protected internal virtual void  AddClause(System.Collections.IList clauses, int conj, int mods, Query q)
+               {
+                       bool required, prohibited;
+                       
+                       // If this term is introduced by AND, make the preceding term required,
+                       // unless it's already prohibited
+                       if (clauses.Count > 0 && conj == CONJ_AND)
+                       {
+                               BooleanClause c = (BooleanClause) clauses[clauses.Count - 1];
+                               if (!c.IsProhibited())
+                                       c.SetOccur(BooleanClause.Occur.MUST);
+                       }
+                       
+                       if (clauses.Count > 0 && operator_Renamed == AND_OPERATOR && conj == CONJ_OR)
+                       {
+                               // If this term is introduced by OR, make the preceding term optional,
+                               // unless it's prohibited (that means we leave -a OR b but +a OR b-->a OR b)
+                               // notice if the input is a OR b, first term is parsed as required; without
+                               // this modification a OR b would parsed as +a OR b
+                               BooleanClause c = (BooleanClause) clauses[clauses.Count - 1];
+                               if (!c.IsProhibited())
+                                       c.SetOccur(BooleanClause.Occur.SHOULD);
+                       }
+                       
+                       // We might have been passed a null query; the term might have been
+                       // filtered away by the analyzer.
+                       if (q == null)
+                               return ;
+                       
+                       if (operator_Renamed == OR_OPERATOR)
+                       {
+                               // We set REQUIRED if we're introduced by AND or +; PROHIBITED if
+                               // introduced by NOT or -; make sure not to set both.
+                               prohibited = (mods == MOD_NOT);
+                               required = (mods == MOD_REQ);
+                               if (conj == CONJ_AND && !prohibited)
+                               {
+                                       required = true;
+                               }
+                       }
+                       else
+                       {
+                               // We set PROHIBITED if we're introduced by NOT or -; We set REQUIRED
+                               // if not PROHIBITED and not introduced by OR
+                               prohibited = (mods == MOD_NOT);
+                               required = (!prohibited && conj != CONJ_OR);
+                       }
+                       if (required && !prohibited)
+                               clauses.Add(NewBooleanClause(q, BooleanClause.Occur.MUST));
+                       else if (!required && !prohibited)
+                               clauses.Add(NewBooleanClause(q, BooleanClause.Occur.SHOULD));
+                       else if (!required && prohibited)
+                               clauses.Add(NewBooleanClause(q, BooleanClause.Occur.MUST_NOT));
+                       else
+                               throw new System.SystemException("Clause cannot be both required and prohibited");
+               }
+               
+               
+               /// <exception cref="ParseException">throw in overridden method to disallow
+               /// </exception>
+               public /*protected internal*/ virtual Query GetFieldQuery(System.String field, System.String queryText)
+               {
+                       // Use the analyzer to get all the tokens, and then build a TermQuery,
+                       // PhraseQuery, or nothing based on the term count
+                       
+                       TokenStream source;
+                       try
+                       {
+                               source = analyzer.ReusableTokenStream(field, new System.IO.StringReader(queryText));
+                               source.Reset();
+                       }
+                       catch (System.IO.IOException e)
+                       {
+                               source = analyzer.TokenStream(field, new System.IO.StringReader(queryText));
+                       }
+                       CachingTokenFilter buffer = new CachingTokenFilter(source);
+                       TermAttribute termAtt = null;
+                       PositionIncrementAttribute posIncrAtt = null;
+                       int numTokens = 0;
+                       
+                       bool success = false;
+                       try
+                       {
+                               buffer.Reset();
+                               success = true;
+                       }
+                       catch (System.IO.IOException e)
+                       {
+                               // success==false if we hit an exception
+                       }
+                       if (success)
+                       {
+                               if (buffer.HasAttribute(typeof(TermAttribute)))
+                               {
+                                       termAtt = (TermAttribute) buffer.GetAttribute(typeof(TermAttribute));
+                               }
+                               if (buffer.HasAttribute(typeof(PositionIncrementAttribute)))
+                               {
+                                       posIncrAtt = (PositionIncrementAttribute) buffer.GetAttribute(typeof(PositionIncrementAttribute));
+                               }
+                       }
+                       
+                       int positionCount = 0;
+                       bool severalTokensAtSamePosition = false;
+                       
+                       bool hasMoreTokens = false;
+                       if (termAtt != null)
+                       {
+                               try
+                               {
+                                       hasMoreTokens = buffer.IncrementToken();
+                                       while (hasMoreTokens)
+                                       {
+                                               numTokens++;
+                                               int positionIncrement = (posIncrAtt != null)?posIncrAtt.GetPositionIncrement():1;
+                                               if (positionIncrement != 0)
+                                               {
+                                                       positionCount += positionIncrement;
+                                               }
+                                               else
+                                               {
+                                                       severalTokensAtSamePosition = true;
+                                               }
+                                               hasMoreTokens = buffer.IncrementToken();
+                                       }
+                               }
+                               catch (System.IO.IOException e)
+                               {
+                                       // ignore
+                               }
+                       }
+                       try
+                       {
+                               // rewind the buffer stream
+                               buffer.Reset();
+                               
+                               // close original stream - all tokens buffered
+                               source.Close();
+                       }
+                       catch (System.IO.IOException e)
+                       {
+                               // ignore
+                       }
+                       
+                       if (numTokens == 0)
+                               return null;
+                       else if (numTokens == 1)
+                       {
+                               System.String term = null;
+                               try
+                               {
+                                       bool hasNext = buffer.IncrementToken();
+                                       System.Diagnostics.Debug.Assert(hasNext == true);
+                                       term = termAtt.Term();
+                               }
+                               catch (System.IO.IOException e)
+                               {
+                                       // safe to ignore, because we know the number of tokens
+                               }
+                               return NewTermQuery(new Term(field, term));
+                       }
+                       else
+                       {
+                               if (severalTokensAtSamePosition)
+                               {
+                                       if (positionCount == 1)
+                                       {
+                                               // no phrase query:
+                                               BooleanQuery q = NewBooleanQuery(true);
+                                               for (int i = 0; i < numTokens; i++)
+                                               {
+                                                       System.String term = null;
+                                                       try
+                                                       {
+                                                               bool hasNext = buffer.IncrementToken();
+                                                               System.Diagnostics.Debug.Assert(hasNext == true);
+                                                               term = termAtt.Term();
+                                                       }
+                                                       catch (System.IO.IOException e)
+                                                       {
+                                                               // safe to ignore, because we know the number of tokens
+                                                       }
+                                                       
+                                                       Query currentQuery = NewTermQuery(new Term(field, term));
+                                                       q.Add(currentQuery, BooleanClause.Occur.SHOULD);
+                                               }
+                                               return q;
+                                       }
+                                       else
+                                       {
+                                               // phrase query:
+                                               MultiPhraseQuery mpq = NewMultiPhraseQuery();
+                                               mpq.SetSlop(phraseSlop);
+                                               System.Collections.ArrayList multiTerms = new System.Collections.ArrayList();
+                                               int position = - 1;
+                                               for (int i = 0; i < numTokens; i++)
+                                               {
+                                                       System.String term = null;
+                                                       int positionIncrement = 1;
+                                                       try
+                                                       {
+                                                               bool hasNext = buffer.IncrementToken();
+                                                               System.Diagnostics.Debug.Assert(hasNext == true);
+                                                               term = termAtt.Term();
+                                                               if (posIncrAtt != null)
+                                                               {
+                                                                       positionIncrement = posIncrAtt.GetPositionIncrement();
+                                                               }
+                                                       }
+                                                       catch (System.IO.IOException e)
+                                                       {
+                                                               // safe to ignore, because we know the number of tokens
+                                                       }
+                                                       
+                                                       if (positionIncrement > 0 && multiTerms.Count > 0)
+                                                       {
+                                                               if (enablePositionIncrements)
+                                                               {
+                                    mpq.Add((Term[]) multiTerms.ToArray(typeof(Term)), position);
+                                                               }
+                                                               else
+                                                               {
+                                    mpq.Add((Term[]) multiTerms.ToArray(typeof(Term)));
+                                                               }
+                                                               multiTerms.Clear();
+                                                       }
+                                                       position += positionIncrement;
+                                                       multiTerms.Add(new Term(field, term));
+                                               }
+                                               if (enablePositionIncrements)
+                                               {
+                            mpq.Add((Term[]) multiTerms.ToArray(typeof(Term)), position);
+                                               }
+                                               else
+                                               {
+                            mpq.Add((Term[]) multiTerms.ToArray(typeof(Term)));
+                                               }
+                                               return mpq;
+                                       }
+                               }
+                               else
+                               {
+                                       PhraseQuery pq = NewPhraseQuery();
+                                       pq.SetSlop(phraseSlop);
+                                       int position = - 1;
+                                       
+                                       
+                                       for (int i = 0; i < numTokens; i++)
+                                       {
+                                               System.String term = null;
+                                               int positionIncrement = 1;
+                                               
+                                               try
+                                               {
+                                                       bool hasNext = buffer.IncrementToken();
+                                                       System.Diagnostics.Debug.Assert(hasNext == true);
+                                                       term = termAtt.Term();
+                                                       if (posIncrAtt != null)
+                                                       {
+                                                               positionIncrement = posIncrAtt.GetPositionIncrement();
+                                                       }
+                                               }
+                                               catch (System.IO.IOException e)
+                                               {
+                                                       // safe to ignore, because we know the number of tokens
+                                               }
+                                               
+                                               if (enablePositionIncrements)
+                                               {
+                                                       position += positionIncrement;
+                                                       pq.Add(new Term(field, term), position);
+                                               }
+                                               else
+                                               {
+                                                       pq.Add(new Term(field, term));
+                                               }
+                                       }
+                                       return pq;
+                               }
+                       }
+               }
+               
+               
+               
+               /// <summary> Base implementation delegates to {@link #GetFieldQuery(String,String)}.
+               /// This method may be overridden, for example, to return
+               /// a SpanNearQuery instead of a PhraseQuery.
+               /// 
+               /// </summary>
+               /// <exception cref="ParseException">throw in overridden method to disallow
+               /// </exception>
+               protected internal virtual Query GetFieldQuery(System.String field, System.String queryText, int slop)
+               {
+                       Query query = GetFieldQuery(field, queryText);
+                       
+                       if (query is PhraseQuery)
+                       {
+                               ((PhraseQuery) query).SetSlop(slop);
+                       }
+                       if (query is MultiPhraseQuery)
+                       {
+                               ((MultiPhraseQuery) query).SetSlop(slop);
+                       }
+                       
+                       return query;
+               }
+               
+               
+               /// <exception cref="ParseException">throw in overridden method to disallow
+               /// </exception>
+               protected internal virtual Query GetRangeQuery(System.String field, System.String part1, System.String part2, bool inclusive)
+               {
+                       if (lowercaseExpandedTerms)
+                       {
+                               part1 = part1.ToLower();
+                               part2 = part2.ToLower();
+                       }
+            try
+            {
+                System.DateTime d1;
+                System.DateTime d2;
+
+                try
+                {
+                    d1 = System.DateTime.Parse(part1, locale);
+                }
+                catch (System.Exception)
+                {
+                    d1 = System.DateTime.Parse(part1);
+                }
+                try
+                {
+                    d2 = System.DateTime.Parse(part2, locale);
+                }
+                catch (System.Exception)
+                {
+                    d2 = System.DateTime.Parse(part2);
+                }
+
+                if (inclusive)
+                {
+                    // The user can only specify the date, not the time, so make sure
+                    // the time is set to the latest possible time of that date to really
+                    // include all documents:
+                    System.Globalization.Calendar cal = new System.Globalization.GregorianCalendar();
+                    System.DateTime tempDate = d2;
+                    d2 = d2.AddHours(23 - tempDate.Hour);
+                    d2 = d2.AddMinutes(59 - tempDate.Minute);
+                    d2 = d2.AddSeconds(59 - tempDate.Second);
+                    d2 = d2.AddMilliseconds(999 - tempDate.Millisecond);
+                }
+                DateTools.Resolution resolution = GetDateResolution(field);
+                if (resolution == null)
+                {
+                    // no default or field specific date resolution has been set,
+                                       // use deprecated DateField to maintain compatibility with
+                    // pre-1.9 Lucene versions.
+                    part1 = DateField.DateToString(d1);
+                    part2 = DateField.DateToString(d2);
+                }
+                else
+                {
+                    part1 = DateTools.DateToString(d1, resolution);
+                    part2 = DateTools.DateToString(d2, resolution);
+                }
+            }
+            catch (System.Exception)
+            {
+            }
+
+            return NewRangeQuery(field, part1, part2, inclusive);
+        }
+               
+               /// <summary> Builds a new BooleanQuery instance</summary>
+               /// <param name="disableCoord">disable coord
+               /// </param>
+               /// <returns> new BooleanQuery instance
+               /// </returns>
+               protected internal virtual BooleanQuery NewBooleanQuery(bool disableCoord)
+               {
+                       return new BooleanQuery(disableCoord);
+               }
+               
+               /// <summary> Builds a new BooleanClause instance</summary>
+               /// <param name="q">sub query
+               /// </param>
+               /// <param name="occur">how this clause should occur when matching documents
+               /// </param>
+               /// <returns> new BooleanClause instance
+               /// </returns>
+               protected internal virtual BooleanClause NewBooleanClause(Query q, BooleanClause.Occur occur)
+               {
+                       return new BooleanClause(q, occur);
+               }
+               
+               /// <summary> Builds a new TermQuery instance</summary>
+               /// <param name="term">term
+               /// </param>
+               /// <returns> new TermQuery instance
+               /// </returns>
+               protected internal virtual Query NewTermQuery(Term term)
+               {
+                       return new TermQuery(term);
+               }
+               
+               /// <summary> Builds a new PhraseQuery instance</summary>
+               /// <returns> new PhraseQuery instance
+               /// </returns>
+               protected internal virtual PhraseQuery NewPhraseQuery()
+               {
+                       return new PhraseQuery();
+               }
+               
+               /// <summary> Builds a new MultiPhraseQuery instance</summary>
+               /// <returns> new MultiPhraseQuery instance
+               /// </returns>
+               protected internal virtual MultiPhraseQuery NewMultiPhraseQuery()
+               {
+                       return new MultiPhraseQuery();
+               }
+               
+               /// <summary> Builds a new PrefixQuery instance</summary>
+               /// <param name="prefix">Prefix term
+               /// </param>
+               /// <returns> new PrefixQuery instance
+               /// </returns>
+               protected internal virtual Query NewPrefixQuery(Term prefix)
+               {
+                       PrefixQuery query = new PrefixQuery(prefix);
+                       query.SetRewriteMethod(multiTermRewriteMethod);
+                       return query;
+               }
+               
+               /// <summary> Builds a new FuzzyQuery instance</summary>
+               /// <param name="term">Term
+               /// </param>
+               /// <param name="minimumSimilarity">minimum similarity
+               /// </param>
+               /// <param name="prefixLength">prefix length
+               /// </param>
+               /// <returns> new FuzzyQuery Instance
+               /// </returns>
+               protected internal virtual Query NewFuzzyQuery(Term term, float minimumSimilarity, int prefixLength)
+               {
+                       // FuzzyQuery doesn't yet allow constant score rewrite
+                       return new FuzzyQuery(term, minimumSimilarity, prefixLength);
+               }
+               
+               /// <summary> Builds a new TermRangeQuery instance</summary>
+               /// <param name="field">Field
+               /// </param>
+               /// <param name="part1">min
+               /// </param>
+               /// <param name="part2">max
+               /// </param>
+               /// <param name="inclusive">true if range is inclusive
+               /// </param>
+               /// <returns> new TermRangeQuery instance
+               /// </returns>
+               protected internal virtual Query NewRangeQuery(System.String field, System.String part1, System.String part2, bool inclusive)
+               {
+                       TermRangeQuery query = new TermRangeQuery(field, part1, part2, inclusive, inclusive, rangeCollator);
+                       query.SetRewriteMethod(multiTermRewriteMethod);
+                       return query;
+               }
+               
+               /// <summary> Builds a new MatchAllDocsQuery instance</summary>
+               /// <returns> new MatchAllDocsQuery instance
+               /// </returns>
+               protected internal virtual Query NewMatchAllDocsQuery()
+               {
+                       return new MatchAllDocsQuery();
+               }
+               
+               /// <summary> Builds a new WildcardQuery instance</summary>
+               /// <param name="t">wildcard term
+               /// </param>
+               /// <returns> new WildcardQuery instance
+               /// </returns>
+               protected internal virtual Query NewWildcardQuery(Term t)
+               {
+                       WildcardQuery query = new WildcardQuery(t);
+                       query.SetRewriteMethod(multiTermRewriteMethod);
+                       return query;
+               }
+               
+               /// <summary> Factory method for generating query, given a set of clauses.
+               /// By default creates a boolean query composed of clauses passed in.
+               /// 
+               /// Can be overridden by extending classes, to modify query being
+               /// returned.
+               /// 
+               /// </summary>
+               /// <param name="clauses">List that contains {@link BooleanClause} instances
+               /// to join.
+               /// 
+               /// </param>
+               /// <returns> Resulting {@link Query} object.
+               /// </returns>
+               /// <exception cref="ParseException">throw in overridden method to disallow
+               /// </exception>
+               /// <deprecated> use {@link #GetBooleanQuery(List)} instead
+               /// </deprecated>
+        [Obsolete("use GetBooleanQuery(List) instead")]
+               protected internal virtual Query GetBooleanQuery(System.Collections.ArrayList clauses)
+               {
+                       return GetBooleanQuery((System.Collections.IList) clauses, false);
+               }
+               
+               /// <summary> Factory method for generating query, given a set of clauses.
+               /// By default creates a boolean query composed of clauses passed in.
+               /// 
+               /// Can be overridden by extending classes, to modify query being
+               /// returned.
+               /// 
+               /// </summary>
+               /// <param name="clauses">List that contains {@link BooleanClause} instances
+               /// to join.
+               /// 
+               /// </param>
+               /// <returns> Resulting {@link Query} object.
+               /// </returns>
+               /// <exception cref="ParseException">throw in overridden method to disallow
+               /// </exception>
+               protected internal virtual Query GetBooleanQuery(System.Collections.IList clauses)
+               {
+                       return GetBooleanQuery(clauses, false);
+               }
+               
+               /// <summary> Factory method for generating query, given a set of clauses.
+               /// By default creates a boolean query composed of clauses passed in.
+               /// 
+               /// Can be overridden by extending classes, to modify query being
+               /// returned.
+               /// 
+               /// </summary>
+               /// <param name="clauses">List that contains {@link BooleanClause} instances
+               /// to join.
+               /// </param>
+               /// <param name="disableCoord">true if coord scoring should be disabled.
+               /// 
+               /// </param>
+               /// <returns> Resulting {@link Query} object.
+               /// </returns>
+               /// <exception cref="ParseException">throw in overridden method to disallow
+               /// </exception>
+               /// <deprecated> use {@link #GetBooleanQuery(List, boolean)} instead
+               /// </deprecated>
+        [Obsolete("use GetBooleanQuery(List, bool) instead")]
+               protected internal virtual Query GetBooleanQuery(System.Collections.ArrayList clauses, bool disableCoord)
+               {
+                       return GetBooleanQuery((System.Collections.IList) clauses, disableCoord);
+               }
+               
+               /// <summary> Factory method for generating query, given a set of clauses.
+               /// By default creates a boolean query composed of clauses passed in.
+               /// 
+               /// Can be overridden by extending classes, to modify query being
+               /// returned.
+               /// 
+               /// </summary>
+               /// <param name="clauses">List that contains {@link BooleanClause} instances
+               /// to join.
+               /// </param>
+               /// <param name="disableCoord">true if coord scoring should be disabled.
+               /// 
+               /// </param>
+               /// <returns> Resulting {@link Query} object.
+               /// </returns>
+               /// <exception cref="ParseException">throw in overridden method to disallow
+               /// </exception>
+               protected internal virtual Query GetBooleanQuery(System.Collections.IList clauses, bool disableCoord)
+               {
+                       if (clauses.Count == 0)
+                       {
+                               return null; // all clause words were filtered away by the analyzer.
+                       }
+                       BooleanQuery query = NewBooleanQuery(disableCoord);
+                       for (int i = 0; i < clauses.Count; i++)
+                       {
+                               query.Add((BooleanClause) clauses[i]);
+                       }
+                       return query;
+               }
+               
+               /// <summary> Factory method for generating a query. Called when parser
+               /// parses an input term token that contains one or more wildcard
+               /// characters (? and *), but is not a prefix term token (one
+               /// that has just a single * character at the end)
+               /// <p/>
+               /// Depending on settings, prefix term may be lower-cased
+               /// automatically. It will not go through the default Analyzer,
+               /// however, since normal Analyzers are unlikely to work properly
+               /// with wildcard templates.
+               /// <p/>
+               /// Can be overridden by extending classes, to provide custom handling for
+               /// wildcard queries, which may be necessary due to missing analyzer calls.
+               /// 
+               /// </summary>
+               /// <param name="field">Name of the field query will use.
+               /// </param>
+               /// <param name="termStr">Term token that contains one or more wild card
+               /// characters (? or *), but is not simple prefix term
+               /// 
+               /// </param>
+               /// <returns> Resulting {@link Query} built for the term
+               /// </returns>
+               /// <exception cref="ParseException">throw in overridden method to disallow
+               /// </exception>
+               public /*protected internal*/ virtual Query GetWildcardQuery(System.String field, System.String termStr)
+               {
+                       if ("*".Equals(field))
+                       {
+                               if ("*".Equals(termStr))
+                                       return NewMatchAllDocsQuery();
+                       }
+                       if (!allowLeadingWildcard && (termStr.StartsWith("*") || termStr.StartsWith("?")))
+                               throw new ParseException("'*' or '?' not allowed as first character in WildcardQuery");
+                       if (lowercaseExpandedTerms)
+                       {
+                               termStr = termStr.ToLower();
+                       }
+                       Term t = new Term(field, termStr);
+                       return NewWildcardQuery(t);
+               }
+               
+               /// <summary> Factory method for generating a query (similar to
+               /// {@link #getWildcardQuery}). Called when parser parses an input term
+               /// token that uses prefix notation; that is, contains a single '*' wildcard
+               /// character as its last character. Since this is a special case
+               /// of generic wildcard term, and such a query can be optimized easily,
+               /// this usually results in a different query object.
+               /// <p/>
+               /// Depending on settings, a prefix term may be lower-cased
+               /// automatically. It will not go through the default Analyzer,
+               /// however, since normal Analyzers are unlikely to work properly
+               /// with wildcard templates.
+               /// <p/>
+               /// Can be overridden by extending classes, to provide custom handling for
+               /// wild card queries, which may be necessary due to missing analyzer calls.
+               /// 
+               /// </summary>
+               /// <param name="field">Name of the field query will use.
+               /// </param>
+               /// <param name="termStr">Term token to use for building term for the query
+               /// (<b>without</b> trailing '*' character!)
+               /// 
+               /// </param>
+               /// <returns> Resulting {@link Query} built for the term
+               /// </returns>
+               /// <exception cref="ParseException">throw in overridden method to disallow
+               /// </exception>
+               public /*protected internal*/ virtual Query GetPrefixQuery(System.String field, System.String termStr)
+               {
+                       if (!allowLeadingWildcard && termStr.StartsWith("*"))
+                               throw new ParseException("'*' not allowed as first character in PrefixQuery");
+                       if (lowercaseExpandedTerms)
+                       {
+                               termStr = termStr.ToLower();
+                       }
+                       Term t = new Term(field, termStr);
+                       return NewPrefixQuery(t);
+               }
+               
+               /// <summary> Factory method for generating a query (similar to
+               /// {@link #getWildcardQuery}). Called when parser parses
+               /// an input term token that has the fuzzy suffix (~) appended.
+               /// 
+               /// </summary>
+               /// <param name="field">Name of the field query will use.
+               /// </param>
+               /// <param name="termStr">Term token to use for building term for the query
+               /// 
+               /// </param>
+               /// <returns> Resulting {@link Query} built for the term
+               /// </returns>
+               /// <exception cref="ParseException">throw in overridden method to disallow
+               /// </exception>
+               public /*protected internal*/ virtual Query GetFuzzyQuery(System.String field, System.String termStr, float minSimilarity)
+               {
+                       if (lowercaseExpandedTerms)
+                       {
+                               termStr = termStr.ToLower();
+                       }
+                       Term t = new Term(field, termStr);
+                       return NewFuzzyQuery(t, minSimilarity, fuzzyPrefixLength);
+               }
+               
+               /// <summary> Returns a String where the escape char has been
+               /// removed, or kept only once if there was a double escape.
+               /// 
+               /// Supports escaped unicode characters, e. g. translates
+               /// <code>\\u0041</code> to <code>A</code>.
+               /// 
+               /// </summary>
+               private System.String DiscardEscapeChar(System.String input)
+               {
+                       // Create char array to hold unescaped char sequence
+                       char[] output = new char[input.Length];
+                       
+                       // The length of the output can be less than the input
+                       // due to discarded escape chars. This variable holds
+                       // the actual length of the output
+                       int length = 0;
+                       
+                       // We remember whether the last processed character was 
+                       // an escape character
+                       bool lastCharWasEscapeChar = false;
+                       
+                       // The multiplier the current unicode digit must be multiplied with.
+                       // E. g. the first digit must be multiplied with 16^3, the second with 16^2...
+                       int codePointMultiplier = 0;
+                       
+                       // Used to calculate the codepoint of the escaped unicode character
+                       int codePoint = 0;
+                       
+                       for (int i = 0; i < input.Length; i++)
+                       {
+                               char curChar = input[i];
+                               if (codePointMultiplier > 0)
+                               {
+                                       codePoint += HexToInt(curChar) * codePointMultiplier;
+                                       codePointMultiplier = SupportClass.Number.URShift(codePointMultiplier, 4);
+                                       if (codePointMultiplier == 0)
+                                       {
+                                               output[length++] = (char) codePoint;
+                                               codePoint = 0;
+                                       }
+                               }
+                               else if (lastCharWasEscapeChar)
+                               {
+                                       if (curChar == 'u')
+                                       {
+                                               // found an escaped unicode character
+                                               codePointMultiplier = 16 * 16 * 16;
+                                       }
+                                       else
+                                       {
+                                               // this character was escaped
+                                               output[length] = curChar;
+                                               length++;
+                                       }
+                                       lastCharWasEscapeChar = false;
+                               }
+                               else
+                               {
+                                       if (curChar == '\\')
+                                       {
+                                               lastCharWasEscapeChar = true;
+                                       }
+                                       else
+                                       {
+                                               output[length] = curChar;
+                                               length++;
+                                       }
+                               }
+                       }
+                       
+                       if (codePointMultiplier > 0)
+                       {
+                               throw new ParseException("Truncated unicode escape sequence.");
+                       }
+                       
+                       if (lastCharWasEscapeChar)
+                       {
+                               throw new ParseException("Term can not end with escape character.");
+                       }
+                       
+                       return new System.String(output, 0, length);
+               }
+               
+               /// <summary>Returns the numeric value of the hexadecimal character </summary>
+               private static int HexToInt(char c)
+               {
+                       if ('0' <= c && c <= '9')
+                       {
+                               return c - '0';
+                       }
+                       else if ('a' <= c && c <= 'f')
+                       {
+                               return c - 'a' + 10;
+                       }
+                       else if ('A' <= c && c <= 'F')
+                       {
+                               return c - 'A' + 10;
+                       }
+                       else
+                       {
+                               throw new ParseException("None-hex character in unicode escape sequence: " + c);
+                       }
+               }
+               
+               /// <summary> Returns a String where those characters that QueryParser
+               /// expects to be escaped are escaped by a preceding <code>\</code>.
+               /// </summary>
+               public static System.String Escape(System.String s)
+               {
+                       System.Text.StringBuilder sb = new System.Text.StringBuilder();
+                       for (int i = 0; i < s.Length; i++)
+                       {
+                               char c = s[i];
+                               // These characters are part of the query syntax and must be escaped
+                               if (c == '\\' || c == '+' || c == '-' || c == '!' || c == '(' || c == ')' || c == ':' || c == '^' || c == '[' || c == ']' || c == '\"' || c == '{' || c == '}' || c == '~' || c == '*' || c == '?' || c == '|' || c == '&')
+                               {
+                                       sb.Append('\\');
+                               }
+                               sb.Append(c);
+                       }
+                       return sb.ToString();
+               }
+               
+               /// <summary> Command line tool to test QueryParser, using {@link Mono.Lucene.Net.Analysis.SimpleAnalyzer}.
+               /// Usage:<br/>
+               /// <code>java Mono.Lucene.Net.QueryParsers.QueryParser &lt;input&gt;</code>
+               /// </summary>
+               [STAThread]
+               public static void  Main(System.String[] args)
+               {
+                       if (args.Length == 0)
+                       {
+                               System.Console.Out.WriteLine("Usage: java Mono.Lucene.Net.QueryParsers.QueryParser <input>");
+                               System.Environment.Exit(0);
+                       }
+                       QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "field", new Mono.Lucene.Net.Analysis.SimpleAnalyzer());
+                       Query q = qp.Parse(args[0]);
+                       System.Console.Out.WriteLine(q.ToString("field"));
+               }
+               
+               // *   Query  ::= ( Clause )*
+               // *   Clause ::= ["+", "-"] [<TERM> ":"] ( <TERM> | "(" Query ")" )
+               public int Conjunction()
+               {
+                       int ret = CONJ_NONE;
+                       switch ((jj_ntk == - 1)?Jj_ntk():jj_ntk)
+                       {
+                               
+                               case Mono.Lucene.Net.QueryParsers.QueryParserConstants.AND: 
+                               case Mono.Lucene.Net.QueryParsers.QueryParserConstants.OR: 
+                                       switch ((jj_ntk == - 1)?Jj_ntk():jj_ntk)
+                                       {
+                                               
+                                               case Mono.Lucene.Net.QueryParsers.QueryParserConstants.AND: 
+                                                       Jj_consume_token(Mono.Lucene.Net.QueryParsers.QueryParserConstants.AND);
+                                                       ret = CONJ_AND;
+                                                       break;
+                                               
+                                               case Mono.Lucene.Net.QueryParsers.QueryParserConstants.OR: 
+                                                       Jj_consume_token(Mono.Lucene.Net.QueryParsers.QueryParserConstants.OR);
+                                                       ret = CONJ_OR;
+                                                       break;
+                                               
+                                               default: 
+                                                       jj_la1[0] = jj_gen;
+                                                       Jj_consume_token(- 1);
+                                                       throw new ParseException();
+                                               
+                                       }
+                                       break;
+                               
+                               default: 
+                                       jj_la1[1] = jj_gen;
+                                       ;
+                                       break;
+                               
+                       }
+                       {
+                               if (true)
+                                       return ret;
+                       }
+                       throw new System.ApplicationException("Missing return statement in function");
+               }
+               
+               public int Modifiers()
+               {
+                       int ret = MOD_NONE;
+                       switch ((jj_ntk == - 1)?Jj_ntk():jj_ntk)
+                       {
+                               
+                               case Mono.Lucene.Net.QueryParsers.QueryParserConstants.NOT: 
+                               case Mono.Lucene.Net.QueryParsers.QueryParserConstants.PLUS: 
+                               case Mono.Lucene.Net.QueryParsers.QueryParserConstants.MINUS: 
+                                       switch ((jj_ntk == - 1)?Jj_ntk():jj_ntk)
+                                       {
+                                               
+                                               case Mono.Lucene.Net.QueryParsers.QueryParserConstants.PLUS: 
+                                                       Jj_consume_token(Mono.Lucene.Net.QueryParsers.QueryParserConstants.PLUS);
+                                                       ret = MOD_REQ;
+                                                       break;
+                                               
+                                               case Mono.Lucene.Net.QueryParsers.QueryParserConstants.MINUS: 
+                                                       Jj_consume_token(Mono.Lucene.Net.QueryParsers.QueryParserConstants.MINUS);
+                                                       ret = MOD_NOT;
+                                                       break;
+                                               
+                                               case Mono.Lucene.Net.QueryParsers.QueryParserConstants.NOT: 
+                                                       Jj_consume_token(Mono.Lucene.Net.QueryParsers.QueryParserConstants.NOT);
+                                                       ret = MOD_NOT;
+                                                       break;
+                                               
+                                               default: 
+                                                       jj_la1[2] = jj_gen;
+                                                       Jj_consume_token(- 1);
+                                                       throw new ParseException();
+                                               
+                                       }
+                                       break;
+                               
+                               default: 
+                                       jj_la1[3] = jj_gen;
+                                       ;
+                                       break;
+                               
+                       }
+                       {
+                               if (true)
+                                       return ret;
+                       }
+                       throw new System.ApplicationException("Missing return statement in function");
+               }
+               
+               // This makes sure that there is no garbage after the query string
+               public Query TopLevelQuery(System.String field)
+               {
+                       Query q;
+                       q = Query(field);
+                       Jj_consume_token(0);
+                       {
+                               if (true)
+                                       return q;
+                       }
+                       throw new System.ApplicationException("Missing return statement in function");
+               }
+               
+               public Query Query(System.String field)
+               {
+                       System.Collections.IList clauses = new System.Collections.ArrayList();
+                       Query q, firstQuery = null;
+                       int conj, mods;
+                       mods = Modifiers();
+                       q = Clause(field);
+                       AddClause(clauses, CONJ_NONE, mods, q);
+                       if (mods == MOD_NONE)
+                               firstQuery = q;
+                       while (true)
+                       {
+                               switch ((jj_ntk == - 1)?Jj_ntk():jj_ntk)
+                               {
+                                       
+                                       case Mono.Lucene.Net.QueryParsers.QueryParserConstants.AND: 
+                                       case Mono.Lucene.Net.QueryParsers.QueryParserConstants.OR: 
+                                       case Mono.Lucene.Net.QueryParsers.QueryParserConstants.NOT: 
+                                       case Mono.Lucene.Net.QueryParsers.QueryParserConstants.PLUS: 
+                                       case Mono.Lucene.Net.QueryParsers.QueryParserConstants.MINUS: 
+                                       case Mono.Lucene.Net.QueryParsers.QueryParserConstants.LPAREN: 
+                                       case Mono.Lucene.Net.QueryParsers.QueryParserConstants.STAR: 
+                                       case Mono.Lucene.Net.QueryParsers.QueryParserConstants.QUOTED: 
+                                       case Mono.Lucene.Net.QueryParsers.QueryParserConstants.TERM: 
+                                       case Mono.Lucene.Net.QueryParsers.QueryParserConstants.PREFIXTERM: 
+                                       case Mono.Lucene.Net.QueryParsers.QueryParserConstants.WILDTERM: 
+                                       case Mono.Lucene.Net.QueryParsers.QueryParserConstants.RANGEIN_START: 
+                                       case Mono.Lucene.Net.QueryParsers.QueryParserConstants.RANGEEX_START: 
+                                       case Mono.Lucene.Net.QueryParsers.QueryParserConstants.NUMBER: 
+                                               ;
+                                               break;
+                                       
+                                       default: 
+                                               jj_la1[4] = jj_gen;
+                                               goto label_1_brk;   // {{Aroush-2.9}} this goto maybe misplaced
+                                       
+                               }
+                               conj = Conjunction();
+                               mods = Modifiers();
+                               q = Clause(field);
+                               AddClause(clauses, conj, mods, q);
+                       }
+
+label_1_brk: ;  // {{Aroush-2.9}} this lable maybe misplaced
+                       
+                       if (clauses.Count == 1 && firstQuery != null)
+                       {
+                               if (true)
+                                       return firstQuery;
+                       }
+                       else
+                       {
+                               {
+                                       if (true)
+                                               return GetBooleanQuery(clauses);
+                               }
+                       }
+                       throw new System.ApplicationException("Missing return statement in function");
+               }
+               
+               public Query Clause(System.String field)
+               {
+                       Query q;
+                       Token fieldToken = null, boost = null;
+                       if (Jj_2_1(2))
+                       {
+                               switch ((jj_ntk == - 1)?Jj_ntk():jj_ntk)
+                               {
+                                       
+                                       case Mono.Lucene.Net.QueryParsers.QueryParserConstants.TERM: 
+                                               fieldToken = Jj_consume_token(Mono.Lucene.Net.QueryParsers.QueryParserConstants.TERM);
+                                               Jj_consume_token(Mono.Lucene.Net.QueryParsers.QueryParserConstants.COLON);
+                                               field = DiscardEscapeChar(fieldToken.image);
+                                               break;
+                                       
+                                       case Mono.Lucene.Net.QueryParsers.QueryParserConstants.STAR: 
+                                               Jj_consume_token(Mono.Lucene.Net.QueryParsers.QueryParserConstants.STAR);
+                                               Jj_consume_token(Mono.Lucene.Net.QueryParsers.QueryParserConstants.COLON);
+                                               field = "*";
+                                               break;
+                                       
+                                       default: 
+                                               jj_la1[5] = jj_gen;
+                                               Jj_consume_token(- 1);
+                                               throw new ParseException();
+                                       
+                               }
+                       }
+                       else
+                       {
+                               ;
+                       }
+                       switch ((jj_ntk == - 1)?Jj_ntk():jj_ntk)
+                       {
+                               
+                               case Mono.Lucene.Net.QueryParsers.QueryParserConstants.STAR: 
+                               case Mono.Lucene.Net.QueryParsers.QueryParserConstants.QUOTED: 
+                               case Mono.Lucene.Net.QueryParsers.QueryParserConstants.TERM: 
+                               case Mono.Lucene.Net.QueryParsers.QueryParserConstants.PREFIXTERM: 
+                               case Mono.Lucene.Net.QueryParsers.QueryParserConstants.WILDTERM: 
+                               case Mono.Lucene.Net.QueryParsers.QueryParserConstants.RANGEIN_START: 
+                               case Mono.Lucene.Net.QueryParsers.QueryParserConstants.RANGEEX_START: 
+                               case Mono.Lucene.Net.QueryParsers.QueryParserConstants.NUMBER: 
+                                       q = Term(field);
+                                       break;
+                               
+                               case Mono.Lucene.Net.QueryParsers.QueryParserConstants.LPAREN: 
+                                       Jj_consume_token(Mono.Lucene.Net.QueryParsers.QueryParserConstants.LPAREN);
+                                       q = Query(field);
+                                       Jj_consume_token(Mono.Lucene.Net.QueryParsers.QueryParserConstants.RPAREN);
+                                       switch ((jj_ntk == - 1)?Jj_ntk():jj_ntk)
+                                       {
+                                               
+                                               case Mono.Lucene.Net.QueryParsers.QueryParserConstants.CARAT: 
+                                                       Jj_consume_token(Mono.Lucene.Net.QueryParsers.QueryParserConstants.CARAT);
+                                                       boost = Jj_consume_token(Mono.Lucene.Net.QueryParsers.QueryParserConstants.NUMBER);
+                                                       break;
+                                               
+                                               default: 
+                                                       jj_la1[6] = jj_gen;
+                                                       ;
+                                                       break;
+                                               
+                                       }
+                                       break;
+                               
+                               default: 
+                                       jj_la1[7] = jj_gen;
+                                       Jj_consume_token(- 1);
+                                       throw new ParseException();
+                               
+                       }
+                       if (boost != null)
+                       {
+                               float f = (float) 1.0;
+                               try
+                               {
+                                       f = (float) SupportClass.Single.Parse(boost.image);
+                                       q.SetBoost(f);
+                               }
+                               catch (System.Exception ignored)
+                               {
+                               }
+                       }
+                       {
+                               if (true)
+                                       return q;
+                       }
+                       throw new System.ApplicationException("Missing return statement in function");
+               }
+               
+               public Query Term(System.String field)
+               {
+                       Token term, boost = null, fuzzySlop = null, goop1, goop2;
+                       bool prefix = false;
+                       bool wildcard = false;
+                       bool fuzzy = false;
+                       Query q;
+                       switch ((jj_ntk == - 1)?Jj_ntk():jj_ntk)
+                       {
+                               
+                               case Mono.Lucene.Net.QueryParsers.QueryParserConstants.STAR: 
+                               case Mono.Lucene.Net.QueryParsers.QueryParserConstants.TERM: 
+                               case Mono.Lucene.Net.QueryParsers.QueryParserConstants.PREFIXTERM: 
+                               case Mono.Lucene.Net.QueryParsers.QueryParserConstants.WILDTERM: 
+                               case Mono.Lucene.Net.QueryParsers.QueryParserConstants.NUMBER: 
+                                       switch ((jj_ntk == - 1)?Jj_ntk():jj_ntk)
+                                       {
+                                               
+                                               case Mono.Lucene.Net.QueryParsers.QueryParserConstants.TERM: 
+                                                       term = Jj_consume_token(Mono.Lucene.Net.QueryParsers.QueryParserConstants.TERM);
+                                                       break;
+                                               
+                                               case Mono.Lucene.Net.QueryParsers.QueryParserConstants.STAR: 
+                                                       term = Jj_consume_token(Mono.Lucene.Net.QueryParsers.QueryParserConstants.STAR);
+                                                       wildcard = true;
+                                                       break;
+                                               
+                                               case Mono.Lucene.Net.QueryParsers.QueryParserConstants.PREFIXTERM: 
+                                                       term = Jj_consume_token(Mono.Lucene.Net.QueryParsers.QueryParserConstants.PREFIXTERM);
+                                                       prefix = true;
+                                                       break;
+                                               
+                                               case Mono.Lucene.Net.QueryParsers.QueryParserConstants.WILDTERM: 
+                                                       term = Jj_consume_token(Mono.Lucene.Net.QueryParsers.QueryParserConstants.WILDTERM);
+                                                       wildcard = true;
+                                                       break;
+                                               
+                                               case Mono.Lucene.Net.QueryParsers.QueryParserConstants.NUMBER: 
+                                                       term = Jj_consume_token(Mono.Lucene.Net.QueryParsers.QueryParserConstants.NUMBER);
+                                                       break;
+                                               
+                                               default: 
+                                                       jj_la1[8] = jj_gen;
+                                                       Jj_consume_token(- 1);
+                                                       throw new ParseException();
+                                               
+                                       }
+                                       switch ((jj_ntk == - 1)?Jj_ntk():jj_ntk)
+                                       {
+                                               
+                                               case Mono.Lucene.Net.QueryParsers.QueryParserConstants.FUZZY_SLOP: 
+                                                       fuzzySlop = Jj_consume_token(Mono.Lucene.Net.QueryParsers.QueryParserConstants.FUZZY_SLOP);
+                                                       fuzzy = true;
+                                                       break;
+                                               
+                                               default: 
+                                                       jj_la1[9] = jj_gen;
+                                                       ;
+                                                       break;
+                                               
+                                       }
+                                       switch ((jj_ntk == - 1)?Jj_ntk():jj_ntk)
+                                       {
+                                               
+                                               case Mono.Lucene.Net.QueryParsers.QueryParserConstants.CARAT: 
+                                                       Jj_consume_token(Mono.Lucene.Net.QueryParsers.QueryParserConstants.CARAT);
+                                                       boost = Jj_consume_token(Mono.Lucene.Net.QueryParsers.QueryParserConstants.NUMBER);
+                                                       switch ((jj_ntk == - 1)?Jj_ntk():jj_ntk)
+                                                       {
+                                                               
+                                                               case Mono.Lucene.Net.QueryParsers.QueryParserConstants.FUZZY_SLOP: 
+                                                                       fuzzySlop = Jj_consume_token(Mono.Lucene.Net.QueryParsers.QueryParserConstants.FUZZY_SLOP);
+                                                                       fuzzy = true;
+                                                                       break;
+                                                               
+                                                               default: 
+                                                                       jj_la1[10] = jj_gen;
+                                                                       ;
+                                                                       break;
+                                                               
+                                                       }
+                                                       break;
+                                               
+                                               default: 
+                                                       jj_la1[11] = jj_gen;
+                                                       ;
+                                                       break;
+                                               
+                                       }
+                                       System.String termImage = DiscardEscapeChar(term.image);
+                                       if (wildcard)
+                                       {
+                                               q = GetWildcardQuery(field, termImage);
+                                       }
+                                       else if (prefix)
+                                       {
+                                               q = GetPrefixQuery(field, DiscardEscapeChar(term.image.Substring(0, (term.image.Length - 1) - (0))));
+                                       }
+                                       else if (fuzzy)
+                                       {
+                                               float fms = fuzzyMinSim;
+                                               try
+                                               {
+                                                       fms = (float) SupportClass.Single.Parse(fuzzySlop.image.Substring(1));
+                                               }
+                                               catch (System.Exception ignored)
+                                               {
+                                               }
+                                               if (fms < 0.0f || fms > 1.0f)
+                                               {
+                                                       {
+                                                               if (true)
+                                                                       throw new ParseException("Minimum similarity for a FuzzyQuery has to be between 0.0f and 1.0f !");
+                                                       }
+                                               }
+                                               q = GetFuzzyQuery(field, termImage, fms);
+                                       }
+                                       else
+                                       {
+                                               q = GetFieldQuery(field, termImage);
+                                       }
+                                       break;
+                               
+                               case Mono.Lucene.Net.QueryParsers.QueryParserConstants.RANGEIN_START: 
+                                       Jj_consume_token(Mono.Lucene.Net.QueryParsers.QueryParserConstants.RANGEIN_START);
+                                       switch ((jj_ntk == - 1)?Jj_ntk():jj_ntk)
+                                       {
+                                               
+                                               case Mono.Lucene.Net.QueryParsers.QueryParserConstants.RANGEIN_GOOP: 
+                                                       goop1 = Jj_consume_token(Mono.Lucene.Net.QueryParsers.QueryParserConstants.RANGEIN_GOOP);
+                                                       break;
+                                               
+                                               case Mono.Lucene.Net.QueryParsers.QueryParserConstants.RANGEIN_QUOTED: 
+                                                       goop1 = Jj_consume_token(Mono.Lucene.Net.QueryParsers.QueryParserConstants.RANGEIN_QUOTED);
+                                                       break;
+                                               
+                                               default: 
+                                                       jj_la1[12] = jj_gen;
+                                                       Jj_consume_token(- 1);
+                                                       throw new ParseException();
+                                               
+                                       }
+                                       switch ((jj_ntk == - 1)?Jj_ntk():jj_ntk)
+                                       {
+                                               
+                                               case Mono.Lucene.Net.QueryParsers.QueryParserConstants.RANGEIN_TO: 
+                                                       Jj_consume_token(Mono.Lucene.Net.QueryParsers.QueryParserConstants.RANGEIN_TO);
+                                                       break;
+                                               
+                                               default: 
+                                                       jj_la1[13] = jj_gen;
+                                                       ;
+                                                       break;
+                                               
+                                       }
+                                       switch ((jj_ntk == - 1)?Jj_ntk():jj_ntk)
+                                       {
+                                               
+                                               case Mono.Lucene.Net.QueryParsers.QueryParserConstants.RANGEIN_GOOP: 
+                                                       goop2 = Jj_consume_token(Mono.Lucene.Net.QueryParsers.QueryParserConstants.RANGEIN_GOOP);
+                                                       break;
+                                               
+                                               case Mono.Lucene.Net.QueryParsers.QueryParserConstants.RANGEIN_QUOTED: 
+                                                       goop2 = Jj_consume_token(Mono.Lucene.Net.QueryParsers.QueryParserConstants.RANGEIN_QUOTED);
+                                                       break;
+                                               
+                                               default: 
+                                                       jj_la1[14] = jj_gen;
+                                                       Jj_consume_token(- 1);
+                                                       throw new ParseException();
+                                               
+                                       }
+                                       Jj_consume_token(Mono.Lucene.Net.QueryParsers.QueryParserConstants.RANGEIN_END);
+                                       switch ((jj_ntk == - 1)?Jj_ntk():jj_ntk)
+                                       {
+                                               
+                                               case Mono.Lucene.Net.QueryParsers.QueryParserConstants.CARAT: 
+                                                       Jj_consume_token(Mono.Lucene.Net.QueryParsers.QueryParserConstants.CARAT);
+                                                       boost = Jj_consume_token(Mono.Lucene.Net.QueryParsers.QueryParserConstants.NUMBER);
+                                                       break;
+                                               
+                                               default: 
+                                                       jj_la1[15] = jj_gen;
+                                                       ;
+                                                       break;
+                                               
+                                       }
+                                       if (goop1.kind == Mono.Lucene.Net.QueryParsers.QueryParserConstants.RANGEIN_QUOTED)
+                                       {
+                                               goop1.image = goop1.image.Substring(1, (goop1.image.Length - 1) - (1));
+                                       }
+                                       if (goop2.kind == Mono.Lucene.Net.QueryParsers.QueryParserConstants.RANGEIN_QUOTED)
+                                       {
+                                               goop2.image = goop2.image.Substring(1, (goop2.image.Length - 1) - (1));
+                                       }
+                                       q = GetRangeQuery(field, DiscardEscapeChar(goop1.image), DiscardEscapeChar(goop2.image), true);
+                                       break;
+                               
+                               case Mono.Lucene.Net.QueryParsers.QueryParserConstants.RANGEEX_START: 
+                                       Jj_consume_token(Mono.Lucene.Net.QueryParsers.QueryParserConstants.RANGEEX_START);
+                                       switch ((jj_ntk == - 1)?Jj_ntk():jj_ntk)
+                                       {
+                                               
+                                               case Mono.Lucene.Net.QueryParsers.QueryParserConstants.RANGEEX_GOOP: 
+                                                       goop1 = Jj_consume_token(Mono.Lucene.Net.QueryParsers.QueryParserConstants.RANGEEX_GOOP);
+                                                       break;
+                                               
+                                               case Mono.Lucene.Net.QueryParsers.QueryParserConstants.RANGEEX_QUOTED: 
+                                                       goop1 = Jj_consume_token(Mono.Lucene.Net.QueryParsers.QueryParserConstants.RANGEEX_QUOTED);
+                                                       break;
+                                               
+                                               default: 
+                                                       jj_la1[16] = jj_gen;
+                                                       Jj_consume_token(- 1);
+                                                       throw new ParseException();
+                                               
+                                       }
+                                       switch ((jj_ntk == - 1)?Jj_ntk():jj_ntk)
+                                       {
+                                               
+                                               case Mono.Lucene.Net.QueryParsers.QueryParserConstants.RANGEEX_TO: 
+                                                       Jj_consume_token(Mono.Lucene.Net.QueryParsers.QueryParserConstants.RANGEEX_TO);
+                                                       break;
+                                               
+                                               default: 
+                                                       jj_la1[17] = jj_gen;
+                                                       ;
+                                                       break;
+                                               
+                                       }
+                                       switch ((jj_ntk == - 1)?Jj_ntk():jj_ntk)
+                                       {
+                                               
+                                               case Mono.Lucene.Net.QueryParsers.QueryParserConstants.RANGEEX_GOOP: 
+                                                       goop2 = Jj_consume_token(Mono.Lucene.Net.QueryParsers.QueryParserConstants.RANGEEX_GOOP);
+                                                       break;
+                                               
+                                               case Mono.Lucene.Net.QueryParsers.QueryParserConstants.RANGEEX_QUOTED: 
+                                                       goop2 = Jj_consume_token(Mono.Lucene.Net.QueryParsers.QueryParserConstants.RANGEEX_QUOTED);
+                                                       break;
+                                               
+                                               default: 
+                                                       jj_la1[18] = jj_gen;
+                                                       Jj_consume_token(- 1);
+                                                       throw new ParseException();
+                                               
+                                       }
+                                       Jj_consume_token(Mono.Lucene.Net.QueryParsers.QueryParserConstants.RANGEEX_END);
+                                       switch ((jj_ntk == - 1)?Jj_ntk():jj_ntk)
+                                       {
+                                               
+                                               case Mono.Lucene.Net.QueryParsers.QueryParserConstants.CARAT: 
+                                                       Jj_consume_token(Mono.Lucene.Net.QueryParsers.QueryParserConstants.CARAT);
+                                                       boost = Jj_consume_token(Mono.Lucene.Net.QueryParsers.QueryParserConstants.NUMBER);
+                                                       break;
+                                               
+                                               default: 
+                                                       jj_la1[19] = jj_gen;
+                                                       ;
+                                                       break;
+                                               
+                                       }
+                                       if (goop1.kind == Mono.Lucene.Net.QueryParsers.QueryParserConstants.RANGEEX_QUOTED)
+                                       {
+                                               goop1.image = goop1.image.Substring(1, (goop1.image.Length - 1) - (1));
+                                       }
+                                       if (goop2.kind == Mono.Lucene.Net.QueryParsers.QueryParserConstants.RANGEEX_QUOTED)
+                                       {
+                                               goop2.image = goop2.image.Substring(1, (goop2.image.Length - 1) - (1));
+                                       }
+                                       
+                                       q = GetRangeQuery(field, DiscardEscapeChar(goop1.image), DiscardEscapeChar(goop2.image), false);
+                                       break;
+                               
+                               case Mono.Lucene.Net.QueryParsers.QueryParserConstants.QUOTED: 
+                                       term = Jj_consume_token(Mono.Lucene.Net.QueryParsers.QueryParserConstants.QUOTED);
+                                       switch ((jj_ntk == - 1)?Jj_ntk():jj_ntk)
+                                       {
+                                               
+                                               case Mono.Lucene.Net.QueryParsers.QueryParserConstants.FUZZY_SLOP: 
+                                                       fuzzySlop = Jj_consume_token(Mono.Lucene.Net.QueryParsers.QueryParserConstants.FUZZY_SLOP);
+                                                       break;
+                                               
+                                               default: 
+                                                       jj_la1[20] = jj_gen;
+                                                       ;
+                                                       break;
+                                               
+                                       }
+                                       switch ((jj_ntk == - 1)?Jj_ntk():jj_ntk)
+                                       {
+                                               
+                                               case Mono.Lucene.Net.QueryParsers.QueryParserConstants.CARAT: 
+                                                       Jj_consume_token(Mono.Lucene.Net.QueryParsers.QueryParserConstants.CARAT);
+                                                       boost = Jj_consume_token(Mono.Lucene.Net.QueryParsers.QueryParserConstants.NUMBER);
+                                                       break;
+                                               
+                                               default: 
+                                                       jj_la1[21] = jj_gen;
+                                                       ;
+                                                       break;
+                                               
+                                       }
+                                       int s = phraseSlop;
+                                       
+                                       if (fuzzySlop != null)
+                                       {
+                                               try
+                                               {
+                                                       s = (int) SupportClass.Single.Parse(fuzzySlop.image.Substring(1));
+                                               }
+                                               catch (System.Exception ignored)
+                                               {
+                                               }
+                                       }
+                                       q = GetFieldQuery(field, DiscardEscapeChar(term.image.Substring(1, (term.image.Length - 1) - (1))), s);
+                                       break;
+                               
+                               default: 
+                                       jj_la1[22] = jj_gen;
+                                       Jj_consume_token(- 1);
+                                       throw new ParseException();
+                               
+                       }
+                       if (boost != null)
+                       {
+                               float f = (float) 1.0;
+                               try
+                               {
+                                       f = (float) SupportClass.Single.Parse(boost.image);
+                               }
+                               catch (System.Exception ignored)
+                               {
+                                       /* Should this be handled somehow? (defaults to "no boost", if
+                                       * boost number is invalid)
+                                       */
+                               }
+                               
+                               // avoid boosting null queries, such as those caused by stop words
+                               if (q != null)
+                               {
+                                       q.SetBoost(f);
+                               }
+                       }
+                       {
+                               if (true)
+                                       return q;
+                       }
+                       throw new System.ApplicationException("Missing return statement in function");
+               }
+               
+               private bool Jj_2_1(int xla)
+               {
+                       jj_la = xla; jj_lastpos = jj_scanpos = token;
+                       try
+                       {
+                               return !Jj_3_1();
+                       }
+                       catch (LookaheadSuccess ls)
+                       {
+                               return true;
+                       }
+                       finally
+                       {
+                               Jj_save(0, xla);
+                       }
+               }
+               
+               private bool Jj_3R_2()
+               {
+                       if (Jj_scan_token(Mono.Lucene.Net.QueryParsers.QueryParserConstants.TERM))
+                               return true;
+                       if (Jj_scan_token(Mono.Lucene.Net.QueryParsers.QueryParserConstants.COLON))
+                               return true;
+                       return false;
+               }
+               
+               private bool Jj_3_1()
+               {
+                       Token xsp;
+                       xsp = jj_scanpos;
+                       if (Jj_3R_2())
+                       {
+                               jj_scanpos = xsp;
+                               if (Jj_3R_3())
+                                       return true;
+                       }
+                       return false;
+               }
+               
+               private bool Jj_3R_3()
+               {
+                       if (Jj_scan_token(Mono.Lucene.Net.QueryParsers.QueryParserConstants.STAR))
+                               return true;
+                       if (Jj_scan_token(Mono.Lucene.Net.QueryParsers.QueryParserConstants.COLON))
+                               return true;
+                       return false;
+               }
+               
+               /// <summary>Generated Token Manager. </summary>
+               public QueryParserTokenManager token_source;
+               /// <summary>Current token. </summary>
+               public Token token;
+               /// <summary>Next token. </summary>
+               private Token jj_nt;
+               private int jj_ntk;
+               private Token jj_scanpos, jj_lastpos;
+               private int jj_la;
+               private int jj_gen;
+               private int[] jj_la1 = new int[23];
+               private static int[] jj_la1_0;
+               private static int[] jj_la1_1;
+               private static void  Jj_la1_init_0()
+               {
+                       jj_la1_0 = new int[]{0x300, 0x300, 0x1c00, 0x1c00, 0x3ed3f00, 0x90000, 0x20000, 0x3ed2000, 0x2690000, 0x100000, 0x100000, 0x20000, 0x30000000, 0x4000000, 0x30000000, 0x20000, 0x0, 0x40000000, 0x0, 0x20000, 0x100000, 0x20000, 0x3ed0000};
+               }
+               private static void  Jj_la1_init_1()
+               {
+                       jj_la1_1 = new int[]{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3, 0x0, 0x3, 0x0, 0x0, 0x0, 0x0};
+               }
+               private JJCalls[] jj_2_rtns;
+               private bool jj_rescan = false;
+               private int jj_gc = 0;
+               
+               /// <summary>Constructor with user supplied CharStream. </summary>
+               protected internal QueryParser(CharStream stream)
+               {
+                       InitBlock();
+                       token_source = new QueryParserTokenManager(stream);
+                       token = new Token();
+                       jj_ntk = - 1;
+                       jj_gen = 0;
+                       for (int i = 0; i < 23; i++)
+                               jj_la1[i] = - 1;
+                       for (int i = 0; i < jj_2_rtns.Length; i++)
+                               jj_2_rtns[i] = new JJCalls();
+               }
+               
+               /// <summary>Reinitialise. </summary>
+               public virtual void  ReInit(CharStream stream)
+               {
+                       token_source.ReInit(stream);
+                       token = new Token();
+                       jj_ntk = - 1;
+                       jj_gen = 0;
+                       for (int i = 0; i < 23; i++)
+                               jj_la1[i] = - 1;
+                       for (int i = 0; i < jj_2_rtns.Length; i++)
+                               jj_2_rtns[i] = new JJCalls();
+               }
+               
+               /// <summary>Constructor with generated Token Manager. </summary>
+               protected internal QueryParser(QueryParserTokenManager tm)
+               {
+                       InitBlock();
+                       token_source = tm;
+                       token = new Token();
+                       jj_ntk = - 1;
+                       jj_gen = 0;
+                       for (int i = 0; i < 23; i++)
+                               jj_la1[i] = - 1;
+                       for (int i = 0; i < jj_2_rtns.Length; i++)
+                               jj_2_rtns[i] = new JJCalls();
+               }
+               
+               /// <summary>Reinitialise. </summary>
+               public virtual void  ReInit(QueryParserTokenManager tm)
+               {
+                       token_source = tm;
+                       token = new Token();
+                       jj_ntk = - 1;
+                       jj_gen = 0;
+                       for (int i = 0; i < 23; i++)
+                               jj_la1[i] = - 1;
+                       for (int i = 0; i < jj_2_rtns.Length; i++)
+                               jj_2_rtns[i] = new JJCalls();
+               }
+               
+               private Token Jj_consume_token(int kind)
+               {
+                       Token oldToken;
+                       if ((oldToken = token).next != null)
+                               token = token.next;
+                       else
+                               token = token.next = token_source.GetNextToken();
+                       jj_ntk = - 1;
+                       if (token.kind == kind)
+                       {
+                               jj_gen++;
+                               if (++jj_gc > 100)
+                               {
+                                       jj_gc = 0;
+                                       for (int i = 0; i < jj_2_rtns.Length; i++)
+                                       {
+                                               JJCalls c = jj_2_rtns[i];
+                                               while (c != null)
+                                               {
+                                                       if (c.gen < jj_gen)
+                                                               c.first = null;
+                                                       c = c.next;
+                                               }
+                                       }
+                               }
+                               return token;
+                       }
+                       token = oldToken;
+                       jj_kind = kind;
+                       throw GenerateParseException();
+               }
+               
+               [Serializable]
+               private sealed class LookaheadSuccess:System.ApplicationException
+               {
+               }
+               private LookaheadSuccess jj_ls;
+               private bool Jj_scan_token(int kind)
+               {
+                       if (jj_scanpos == jj_lastpos)
+                       {
+                               jj_la--;
+                               if (jj_scanpos.next == null)
+                               {
+                                       jj_lastpos = jj_scanpos = jj_scanpos.next = token_source.GetNextToken();
+                               }
+                               else
+                               {
+                                       jj_lastpos = jj_scanpos = jj_scanpos.next;
+                               }
+                       }
+                       else
+                       {
+                               jj_scanpos = jj_scanpos.next;
+                       }
+                       if (jj_rescan)
+                       {
+                               int i = 0; Token tok = token;
+                               while (tok != null && tok != jj_scanpos)
+                               {
+                                       i++; tok = tok.next;
+                               }
+                               if (tok != null)
+                                       Jj_add_error_token(kind, i);
+                       }
+                       if (jj_scanpos.kind != kind)
+                               return true;
+                       if (jj_la == 0 && jj_scanpos == jj_lastpos)
+                               throw jj_ls;
+                       return false;
+               }
+               
+               
+               /// <summary>Get the next Token. </summary>
+               public Token GetNextToken()
+               {
+                       if (token.next != null)
+                               token = token.next;
+                       else
+                               token = token.next = token_source.GetNextToken();
+                       jj_ntk = - 1;
+                       jj_gen++;
+                       return token;
+               }
+               
+               /// <summary>Get the specific Token. </summary>
+               public Token GetToken(int index)
+               {
+                       Token t = token;
+                       for (int i = 0; i < index; i++)
+                       {
+                               if (t.next != null)
+                                       t = t.next;
+                               else
+                                       t = t.next = token_source.GetNextToken();
+                       }
+                       return t;
+               }
+               
+               private int Jj_ntk()
+               {
+                       if ((jj_nt = token.next) == null)
+                               return (jj_ntk = (token.next = token_source.GetNextToken()).kind);
+                       else
+                               return (jj_ntk = jj_nt.kind);
+               }
+               
+               private System.Collections.IList jj_expentries = new System.Collections.ArrayList();
+               private int[] jj_expentry;
+               private int jj_kind = - 1;
+               private int[] jj_lasttokens = new int[100];
+               private int jj_endpos;
+               
+               private void  Jj_add_error_token(int kind, int pos)
+               {
+                       if (pos >= 100)
+                               return ;
+                       if (pos == jj_endpos + 1)
+                       {
+                               jj_lasttokens[jj_endpos++] = kind;
+                       }
+                       else if (jj_endpos != 0)
+                       {
+                               jj_expentry = new int[jj_endpos];
+                               for (int i = 0; i < jj_endpos; i++)
+                               {
+                                       jj_expentry[i] = jj_lasttokens[i];
+                               }
+                               if (pos != 0)
+                                       jj_lasttokens[(jj_endpos = pos) - 1] = kind;
+                       }
+               }
+               
+               /// <summary>Generate ParseException. </summary>
+               public virtual ParseException GenerateParseException()
+               {
+                       jj_expentries.Clear();
+                       bool[] la1tokens = new bool[34];
+                       if (jj_kind >= 0)
+                       {
+                               la1tokens[jj_kind] = true;
+                               jj_kind = - 1;
+                       }
+                       for (int i = 0; i < 23; i++)
+                       {
+                               if (jj_la1[i] == jj_gen)
+                               {
+                                       for (int j = 0; j < 32; j++)
+                                       {
+                                               if ((jj_la1_0[i] & (1 << j)) != 0)
+                                               {
+                                                       la1tokens[j] = true;
+                                               }
+                                               if ((jj_la1_1[i] & (1 << j)) != 0)
+                                               {
+                                                       la1tokens[32 + j] = true;
+                                               }
+                                       }
+                               }
+                       }
+                       for (int i = 0; i < 34; i++)
+                       {
+                               if (la1tokens[i])
+                               {
+                                       jj_expentry = new int[1];
+                                       jj_expentry[0] = i;
+                                       jj_expentries.Add(jj_expentry);
+                               }
+                       }
+                       jj_endpos = 0;
+                       Jj_rescan_token();
+                       Jj_add_error_token(0, 0);
+                       int[][] exptokseq = new int[jj_expentries.Count][];
+                       for (int i = 0; i < jj_expentries.Count; i++)
+                       {
+                               exptokseq[i] = (int[]) jj_expentries[i];
+                       }
+                       return new ParseException(token, exptokseq, Mono.Lucene.Net.QueryParsers.QueryParserConstants.tokenImage);
+               }
+               
+               /// <summary>Enable tracing. </summary>
+               public void  Enable_tracing()
+               {
+               }
+               
+               /// <summary>Disable tracing. </summary>
+               public void  Disable_tracing()
+               {
+               }
+               
+               private void  Jj_rescan_token()
+               {
+                       jj_rescan = true;
+                       for (int i = 0; i < 1; i++)
+                       {
+                               try
+                               {
+                                       JJCalls p = jj_2_rtns[i];
+                                       do 
+                                       {
+                                               if (p.gen > jj_gen)
+                                               {
+                                                       jj_la = p.arg; jj_lastpos = jj_scanpos = p.first;
+                                                       switch (i)
+                                                       {
+                                                               
+                                                               case 0:  Jj_3_1(); break;
+                                                               }
+                                               }
+                                               p = p.next;
+                                       }
+                                       while (p != null);
+                               }
+                               catch (LookaheadSuccess ls)
+                               {
+                               }
+                       }
+                       jj_rescan = false;
+               }
+               
+               private void  Jj_save(int index, int xla)
+               {
+                       JJCalls p = jj_2_rtns[index];
+                       while (p.gen > jj_gen)
+                       {
+                               if (p.next == null)
+                               {
+                                       p = p.next = new JJCalls(); break;
+                               }
+                               p = p.next;
+                       }
+                       p.gen = jj_gen + xla - jj_la; p.first = token; p.arg = xla;
+               }
+               
+               internal sealed class JJCalls
+               {
+                       internal int gen;
+                       internal Token first;
+                       internal int arg;
+                       internal JJCalls next;
+               }
+               static QueryParser()
+               {
+                       {
+                               Jj_la1_init_0();
+                               Jj_la1_init_1();
+                       }
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/QueryParser/QueryParserConstants.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/QueryParser/QueryParserConstants.cs
new file mode 100644 (file)
index 0000000..9503884
--- /dev/null
@@ -0,0 +1,143 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* Generated By:JavaCC: Do not edit this line. QueryParserConstants.java */
+
+using System;
+
+namespace Mono.Lucene.Net.QueryParsers
+{
+       
+       
+       /// <summary> Token literal values and constants.
+       /// Generated by org.javacc.parser.OtherFilesGen#start()
+       /// </summary>
+       public class QueryParserConstants
+       {
+               /// <summary>End of File. </summary>
+               public const int EOF = 0;
+               /// <summary>RegularExpression Id. </summary>
+               public const int _NUM_CHAR = 1;
+               /// <summary>RegularExpression Id. </summary>
+               public const int _ESCAPED_CHAR = 2;
+               /// <summary>RegularExpression Id. </summary>
+               public const int _TERM_START_CHAR = 3;
+               /// <summary>RegularExpression Id. </summary>
+               public const int _TERM_CHAR = 4;
+               /// <summary>RegularExpression Id. </summary>
+               public const int _WHITESPACE = 5;
+               /// <summary>RegularExpression Id. </summary>
+               public const int _QUOTED_CHAR = 6;
+               /// <summary>RegularExpression Id. </summary>
+               public const int AND = 8;
+               /// <summary>RegularExpression Id. </summary>
+               public const int OR = 9;
+               /// <summary>RegularExpression Id. </summary>
+               public const int NOT = 10;
+               /// <summary>RegularExpression Id. </summary>
+               public const int PLUS = 11;
+               /// <summary>RegularExpression Id. </summary>
+               public const int MINUS = 12;
+               /// <summary>RegularExpression Id. </summary>
+               public const int LPAREN = 13;
+               /// <summary>RegularExpression Id. </summary>
+               public const int RPAREN = 14;
+               /// <summary>RegularExpression Id. </summary>
+               public const int COLON = 15;
+               /// <summary>RegularExpression Id. </summary>
+               public const int STAR = 16;
+               /// <summary>RegularExpression Id. </summary>
+               public const int CARAT = 17;
+               /// <summary>RegularExpression Id. </summary>
+               public const int QUOTED = 18;
+               /// <summary>RegularExpression Id. </summary>
+               public const int TERM = 19;
+               /// <summary>RegularExpression Id. </summary>
+               public const int FUZZY_SLOP = 20;
+               /// <summary>RegularExpression Id. </summary>
+               public const int PREFIXTERM = 21;
+               /// <summary>RegularExpression Id. </summary>
+               public const int WILDTERM = 22;
+               /// <summary>RegularExpression Id. </summary>
+               public const int RANGEIN_START = 23;
+               /// <summary>RegularExpression Id. </summary>
+               public const int RANGEEX_START = 24;
+               /// <summary>RegularExpression Id. </summary>
+               public const int NUMBER = 25;
+               /// <summary>RegularExpression Id. </summary>
+               public const int RANGEIN_TO = 26;
+               /// <summary>RegularExpression Id. </summary>
+               public const int RANGEIN_END = 27;
+               /// <summary>RegularExpression Id. </summary>
+               public const int RANGEIN_QUOTED = 28;
+               /// <summary>RegularExpression Id. </summary>
+               public const int RANGEIN_GOOP = 29;
+               /// <summary>RegularExpression Id. </summary>
+               public const int RANGEEX_TO = 30;
+               /// <summary>RegularExpression Id. </summary>
+               public const int RANGEEX_END = 31;
+               /// <summary>RegularExpression Id. </summary>
+               public const int RANGEEX_QUOTED = 32;
+               /// <summary>RegularExpression Id. </summary>
+               public const int RANGEEX_GOOP = 33;
+               /// <summary>Lexical state. </summary>
+               public const int Boost = 0;
+               /// <summary>Lexical state. </summary>
+               public const int RangeEx = 1;
+               /// <summary>Lexical state. </summary>
+               public const int RangeIn = 2;
+               /// <summary>Lexical state. </summary>
+               public const int DEFAULT = 3;
+               /// <summary>Literal token values. </summary>
+               public static System.String[] tokenImage = new System.String[] {
+            "<EOF>", 
+            "<_NUM_CHAR>", 
+            "<_ESCAPED_CHAR>", 
+            "<_TERM_START_CHAR>", 
+            "<_TERM_CHAR>", 
+            "<_WHITESPACE>", 
+            "<_QUOTED_CHAR>", 
+            "<token of kind 7>", 
+            "<AND>", 
+            "<OR>", 
+            "<NOT>", 
+            "\"+\"", 
+            "\"-\"", 
+            "\"(\"", 
+            "\")\"", 
+            "\":\"", 
+            "\"*\"", 
+            "\"^\"", 
+            "<QUOTED>", 
+            "<TERM>", 
+            "<FUZZY_SLOP>", 
+            "<PREFIXTERM>", 
+            "<WILDTERM>", 
+            "\"[\"", 
+            "\"{\"", 
+            "<NUMBER>", 
+            "\"TO\"", 
+            "\"]\"", 
+            "<RANGEIN_QUOTED>", 
+            "<RANGEIN_GOOP>", 
+            "\"TO\"", 
+            "\"}\"", 
+            "<RANGEEX_QUOTED>", 
+            "<RANGEEX_GOOP>"
+        };
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/QueryParser/QueryParserTokenManager.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/QueryParser/QueryParserTokenManager.cs
new file mode 100644 (file)
index 0000000..3f7d125
--- /dev/null
@@ -0,0 +1,1486 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* Generated By:JavaCC: Do not edit this line. QueryParserTokenManager.java */
+
+using System;
+
+using Analyzer = Mono.Lucene.Net.Analysis.Analyzer;
+using CachingTokenFilter = Mono.Lucene.Net.Analysis.CachingTokenFilter;
+using TokenStream = Mono.Lucene.Net.Analysis.TokenStream;
+using PositionIncrementAttribute = Mono.Lucene.Net.Analysis.Tokenattributes.PositionIncrementAttribute;
+using TermAttribute = Mono.Lucene.Net.Analysis.Tokenattributes.TermAttribute;
+using DateField = Mono.Lucene.Net.Documents.DateField;
+using DateTools = Mono.Lucene.Net.Documents.DateTools;
+using Term = Mono.Lucene.Net.Index.Term;
+using Parameter = Mono.Lucene.Net.Util.Parameter;
+using BooleanClause = Mono.Lucene.Net.Search.BooleanClause;
+using BooleanQuery = Mono.Lucene.Net.Search.BooleanQuery;
+using FuzzyQuery = Mono.Lucene.Net.Search.FuzzyQuery;
+using MatchAllDocsQuery = Mono.Lucene.Net.Search.MatchAllDocsQuery;
+using MultiPhraseQuery = Mono.Lucene.Net.Search.MultiPhraseQuery;
+using MultiTermQuery = Mono.Lucene.Net.Search.MultiTermQuery;
+using PhraseQuery = Mono.Lucene.Net.Search.PhraseQuery;
+using PrefixQuery = Mono.Lucene.Net.Search.PrefixQuery;
+using Query = Mono.Lucene.Net.Search.Query;
+using TermQuery = Mono.Lucene.Net.Search.TermQuery;
+using TermRangeQuery = Mono.Lucene.Net.Search.TermRangeQuery;
+using WildcardQuery = Mono.Lucene.Net.Search.WildcardQuery;
+using Version = Mono.Lucene.Net.Util.Version;
+
+namespace Mono.Lucene.Net.QueryParsers
+{
+       
+       /// <summary>Token Manager. </summary>
+       public class QueryParserTokenManager : QueryParserConstants
+       {
+               private void  InitBlock()
+               {
+                       System.IO.StreamWriter temp_writer;
+                       temp_writer = new System.IO.StreamWriter(System.Console.OpenStandardOutput(), System.Console.Out.Encoding);
+                       temp_writer.AutoFlush = true;
+                       debugStream = temp_writer;
+               }
+               
+               /// <summary>Debug output. </summary>
+               public System.IO.StreamWriter debugStream;
+               /// <summary>Set debug output. </summary>
+               public virtual void  SetDebugStream(System.IO.StreamWriter ds)
+               {
+                       debugStream = ds;
+               }
+               private int JjStopStringLiteralDfa_3(int pos, long active0)
+               {
+                       switch (pos)
+                       {
+                               
+                               default: 
+                                       return - 1;
+                               
+                       }
+               }
+               private int JjStartNfa_3(int pos, long active0)
+               {
+                       return JjMoveNfa_3(JjStopStringLiteralDfa_3(pos, active0), pos + 1);
+               }
+               private int JjStopAtPos(int pos, int kind)
+               {
+                       jjmatchedKind = kind;
+                       jjmatchedPos = pos;
+                       return pos + 1;
+               }
+               private int JjMoveStringLiteralDfa0_3()
+               {
+                       switch (curChar)
+                       {
+                               
+                               case (char) (40): 
+                                       return JjStopAtPos(0, 13);
+                               
+                               case (char) (41): 
+                                       return JjStopAtPos(0, 14);
+                               
+                               case (char) (42): 
+                                       return JjStartNfaWithStates_3(0, 16, 36);
+                               
+                               case (char) (43): 
+                                       return JjStopAtPos(0, 11);
+                               
+                               case (char) (45): 
+                                       return JjStopAtPos(0, 12);
+                               
+                               case (char) (58): 
+                                       return JjStopAtPos(0, 15);
+                               
+                               case (char) (91): 
+                                       return JjStopAtPos(0, 23);
+                               
+                               case (char) (94): 
+                                       return JjStopAtPos(0, 17);
+                               
+                               case (char) (123): 
+                                       return JjStopAtPos(0, 24);
+                               
+                               default: 
+                                       return JjMoveNfa_3(0, 0);
+                               
+                       }
+               }
+               private int JjStartNfaWithStates_3(int pos, int kind, int state)
+               {
+                       jjmatchedKind = kind;
+                       jjmatchedPos = pos;
+                       try
+                       {
+                               curChar = input_stream.ReadChar();
+                       }
+                       catch (System.IO.IOException e)
+                       {
+                               return pos + 1;
+                       }
+                       return JjMoveNfa_3(state, pos + 1);
+               }
+               internal static readonly ulong[] jjbitVec0 = new ulong[]{0x1L, 0x0L, 0x0L, 0x0L};
+               internal static readonly ulong[] jjbitVec1 = new ulong[]{0xfffffffffffffffeL, 0xffffffffffffffffL, 0xffffffffffffffffL, 0xffffffffffffffffL};
+               internal static readonly ulong[] jjbitVec3 = new ulong[]{0x0L, 0x0L, 0xffffffffffffffffL, 0xffffffffffffffffL};
+               internal static readonly ulong[] jjbitVec4 = new ulong[]{0xfffefffffffffffeL, 0xffffffffffffffffL, 0xffffffffffffffffL, 0xffffffffffffffffL};
+               private int JjMoveNfa_3(int startState, int curPos)
+               {
+                       int startsAt = 0;
+                       jjnewStateCnt = 36;
+                       int i = 1;
+                       jjstateSet[0] = startState;
+                       int kind = 0x7fffffff;
+                       for (; ; )
+                       {
+                               if (++jjround == 0x7fffffff)
+                                       ReInitRounds();
+                               if (curChar < 64)
+                               {
+                                       ulong l = (ulong) (1L << (int) curChar);
+                                       do 
+                                       {
+                                               switch (jjstateSet[--i])
+                                               {
+                                                       
+                                                       case 36: 
+                                                       case 25: 
+                                                               if ((0xfbfffcf8ffffd9ffL & l) == (ulong) 0L)
+                                                                       break;
+                                                               if (kind > 22)
+                                                                       kind = 22;
+                                                               JjCheckNAddTwoStates(25, 26);
+                                                               break;
+                                                       
+                                                       case 0: 
+                                                               if ((0xfbffd4f8ffffd9ffL & l) != (ulong) 0L)
+                                                               {
+                                                                       if (kind > 22)
+                                                                               kind = 22;
+                                                                       JjCheckNAddTwoStates(25, 26);
+                                                               }
+                                                               else if ((0x100002600L & l) != 0L)
+                                                               {
+                                                                       if (kind > 7)
+                                                                               kind = 7;
+                                                               }
+                                                               else if (curChar == 34)
+                                                                       JjCheckNAddStates(0, 2);
+                                                               else if (curChar == 33)
+                                                               {
+                                                                       if (kind > 10)
+                                                                               kind = 10;
+                                                               }
+                                                               if ((0x7bffd0f8ffffd9ffL & l) != 0L)
+                                                               {
+                                                                       if (kind > 19)
+                                                                               kind = 19;
+                                                                       JjCheckNAddStates(3, 7);
+                                                               }
+                                                               else if (curChar == 42)
+                                                               {
+                                                                       if (kind > 21)
+                                                                               kind = 21;
+                                                               }
+                                                               if (curChar == 38)
+                                                                       jjstateSet[jjnewStateCnt++] = 4;
+                                                               break;
+                                                       
+                                                       case 4: 
+                                                               if (curChar == 38 && kind > 8)
+                                                                       kind = 8;
+                                                               break;
+                                                       
+                                                       case 5: 
+                                                               if (curChar == 38)
+                                                                       jjstateSet[jjnewStateCnt++] = 4;
+                                                               break;
+                                                       
+                                                       case 13: 
+                                                               if (curChar == 33 && kind > 10)
+                                                                       kind = 10;
+                                                               break;
+                                                       
+                                                       case 14: 
+                                                               if (curChar == 34)
+                                                                       JjCheckNAddStates(0, 2);
+                                                               break;
+                                                       
+                                                       case 15: 
+                                                               if ((0xfffffffbffffffffL & l) != (ulong) 0L)
+                                                                       JjCheckNAddStates(0, 2);
+                                                               break;
+                                                       
+                                                       case 17: 
+                                                               JjCheckNAddStates(0, 2);
+                                                               break;
+                                                       
+                                                       case 18: 
+                                                               if (curChar == 34 && kind > 18)
+                                                                       kind = 18;
+                                                               break;
+                                                       
+                                                       case 20: 
+                                                               if ((0x3ff000000000000L & l) == 0L)
+                                                                       break;
+                                                               if (kind > 20)
+                                                                       kind = 20;
+                                                               JjAddStates(8, 9);
+                                                               break;
+                                                       
+                                                       case 21: 
+                                                               if (curChar == 46)
+                                                                       JjCheckNAdd(22);
+                                                               break;
+                                                       
+                                                       case 22: 
+                                                               if ((0x3ff000000000000L & l) == 0L)
+                                                                       break;
+                                                               if (kind > 20)
+                                                                       kind = 20;
+                                                               JjCheckNAdd(22);
+                                                               break;
+                                                       
+                                                       case 23: 
+                                                               if (curChar == 42 && kind > 21)
+                                                                       kind = 21;
+                                                               break;
+                                                       
+                                                       case 24: 
+                                                               if ((0xfbffd4f8ffffd9ffL & l) == (ulong) 0L)
+                                                                       break;
+                                                               if (kind > 22)
+                                                                       kind = 22;
+                                                               JjCheckNAddTwoStates(25, 26);
+                                                               break;
+                                                       
+                                                       case 27: 
+                                                               if (kind > 22)
+                                                                       kind = 22;
+                                                               JjCheckNAddTwoStates(25, 26);
+                                                               break;
+                                                       
+                                                       case 28: 
+                                                               if ((0x7bffd0f8ffffd9ffL & l) == 0L)
+                                                                       break;
+                                                               if (kind > 19)
+                                                                       kind = 19;
+                                                               JjCheckNAddStates(3, 7);
+                                                               break;
+                                                       
+                                                       case 29: 
+                                                               if ((0x7bfff8f8ffffd9ffL & l) == 0L)
+                                                                       break;
+                                                               if (kind > 19)
+                                                                       kind = 19;
+                                                               JjCheckNAddTwoStates(29, 30);
+                                                               break;
+                                                       
+                                                       case 31: 
+                                                               if (kind > 19)
+                                                                       kind = 19;
+                                                               JjCheckNAddTwoStates(29, 30);
+                                                               break;
+                                                       
+                                                       case 32: 
+                                                               if ((0x7bfff8f8ffffd9ffL & l) != 0L)
+                                                                       JjCheckNAddStates(10, 12);
+                                                               break;
+                                                       
+                                                       case 34: 
+                                                               JjCheckNAddStates(10, 12);
+                                                               break;
+                                                       
+                                                       default:  break;
+                                                       
+                                               }
+                                       }
+                                       while (i != startsAt);
+                               }
+                               else if (curChar < 128)
+                               {
+                                       ulong l = (ulong) (1L << (curChar & 63));
+                                       do 
+                                       {
+                                               switch (jjstateSet[--i])
+                                               {
+                                                       
+                                                       case 36: 
+                                                               if ((0x97ffffff87ffffffL & l) != (ulong) 0L)
+                                                               {
+                                                                       if (kind > 22)
+                                                                               kind = 22;
+                                                                       JjCheckNAddTwoStates(25, 26);
+                                                               }
+                                                               else if (curChar == 92)
+                                                                       JjCheckNAddTwoStates(27, 27);
+                                                               break;
+                                                       
+                                                       case 0: 
+                                                               if ((0x97ffffff87ffffffL & l) != (ulong) 0L)
+                                                               {
+                                                                       if (kind > 19)
+                                                                               kind = 19;
+                                                                       JjCheckNAddStates(3, 7);
+                                                               }
+                                                               else if (curChar == 92)
+                                                                       JjCheckNAddStates(13, 15);
+                                                               else if (curChar == 126)
+                                                               {
+                                                                       if (kind > 20)
+                                                                               kind = 20;
+                                                                       jjstateSet[jjnewStateCnt++] = 20;
+                                                               }
+                                                               if ((0x97ffffff87ffffffL & l) != (ulong) 0L)
+                                                               {
+                                                                       if (kind > 22)
+                                                                               kind = 22;
+                                                                       JjCheckNAddTwoStates(25, 26);
+                                                               }
+                                                               if (curChar == 78)
+                                                                       jjstateSet[jjnewStateCnt++] = 11;
+                                                               else if (curChar == 124)
+                                                                       jjstateSet[jjnewStateCnt++] = 8;
+                                                               else if (curChar == 79)
+                                                                       jjstateSet[jjnewStateCnt++] = 6;
+                                                               else if (curChar == 65)
+                                                                       jjstateSet[jjnewStateCnt++] = 2;
+                                                               break;
+                                                       
+                                                       case 1: 
+                                                               if (curChar == 68 && kind > 8)
+                                                                       kind = 8;
+                                                               break;
+                                                       
+                                                       case 2: 
+                                                               if (curChar == 78)
+                                                                       jjstateSet[jjnewStateCnt++] = 1;
+                                                               break;
+                                                       
+                                                       case 3: 
+                                                               if (curChar == 65)
+                                                                       jjstateSet[jjnewStateCnt++] = 2;
+                                                               break;
+                                                       
+                                                       case 6: 
+                                                               if (curChar == 82 && kind > 9)
+                                                                       kind = 9;
+                                                               break;
+                                                       
+                                                       case 7: 
+                                                               if (curChar == 79)
+                                                                       jjstateSet[jjnewStateCnt++] = 6;
+                                                               break;
+                                                       
+                                                       case 8: 
+                                                               if (curChar == 124 && kind > 9)
+                                                                       kind = 9;
+                                                               break;
+                                                       
+                                                       case 9: 
+                                                               if (curChar == 124)
+                                                                       jjstateSet[jjnewStateCnt++] = 8;
+                                                               break;
+                                                       
+                                                       case 10: 
+                                                               if (curChar == 84 && kind > 10)
+                                                                       kind = 10;
+                                                               break;
+                                                       
+                                                       case 11: 
+                                                               if (curChar == 79)
+                                                                       jjstateSet[jjnewStateCnt++] = 10;
+                                                               break;
+                                                       
+                                                       case 12: 
+                                                               if (curChar == 78)
+                                                                       jjstateSet[jjnewStateCnt++] = 11;
+                                                               break;
+                                                       
+                                                       case 15: 
+                                                               if ((0xffffffffefffffffL & l) != (ulong) 0L)
+                                                                       JjCheckNAddStates(0, 2);
+                                                               break;
+                                                       
+                                                       case 16: 
+                                                               if (curChar == 92)
+                                                                       jjstateSet[jjnewStateCnt++] = 17;
+                                                               break;
+                                                       
+                                                       case 17: 
+                                                               JjCheckNAddStates(0, 2);
+                                                               break;
+                                                       
+                                                       case 19: 
+                                                               if (curChar != 126)
+                                                                       break;
+                                                               if (kind > 20)
+                                                                       kind = 20;
+                                                               jjstateSet[jjnewStateCnt++] = 20;
+                                                               break;
+                                                       
+                                                       case 24: 
+                                                               if ((0x97ffffff87ffffffL & l) == (ulong) 0L)
+                                                                       break;
+                                                               if (kind > 22)
+                                                                       kind = 22;
+                                                               JjCheckNAddTwoStates(25, 26);
+                                                               break;
+                                                       
+                                                       case 25: 
+                                                               if ((0x97ffffff87ffffffL & l) == (ulong) 0L)
+                                                                       break;
+                                                               if (kind > 22)
+                                                                       kind = 22;
+                                                               JjCheckNAddTwoStates(25, 26);
+                                                               break;
+                                                       
+                                                       case 26: 
+                                                               if (curChar == 92)
+                                                                       JjCheckNAddTwoStates(27, 27);
+                                                               break;
+                                                       
+                                                       case 27: 
+                                                               if (kind > 22)
+                                                                       kind = 22;
+                                                               JjCheckNAddTwoStates(25, 26);
+                                                               break;
+                                                       
+                                                       case 28: 
+                                                               if ((0x97ffffff87ffffffL & l) == (ulong) 0L)
+                                                                       break;
+                                                               if (kind > 19)
+                                                                       kind = 19;
+                                                               JjCheckNAddStates(3, 7);
+                                                               break;
+                                                       
+                                                       case 29: 
+                                                               if ((0x97ffffff87ffffffL & l) == (ulong) 0L)
+                                                                       break;
+                                                               if (kind > 19)
+                                                                       kind = 19;
+                                                               JjCheckNAddTwoStates(29, 30);
+                                                               break;
+                                                       
+                                                       case 30: 
+                                                               if (curChar == 92)
+                                                                       JjCheckNAddTwoStates(31, 31);
+                                                               break;
+                                                       
+                                                       case 31: 
+                                                               if (kind > 19)
+                                                                       kind = 19;
+                                                               JjCheckNAddTwoStates(29, 30);
+                                                               break;
+                                                       
+                                                       case 32: 
+                                                               if ((0x97ffffff87ffffffL & l) != (ulong) 0L)
+                                                                       JjCheckNAddStates(10, 12);
+                                                               break;
+                                                       
+                                                       case 33: 
+                                                               if (curChar == 92)
+                                                                       JjCheckNAddTwoStates(34, 34);
+                                                               break;
+                                                       
+                                                       case 34: 
+                                                               JjCheckNAddStates(10, 12);
+                                                               break;
+                                                       
+                                                       case 35: 
+                                                               if (curChar == 92)
+                                                                       JjCheckNAddStates(13, 15);
+                                                               break;
+                                                       
+                                                       default:  break;
+                                                       
+                                               }
+                                       }
+                                       while (i != startsAt);
+                               }
+                               else
+                               {
+                                       int hiByte = (int) (curChar >> 8);
+                                       int i1 = hiByte >> 6;
+                                       ulong l1 = (ulong) (1L << (hiByte & 63));
+                                       int i2 = (curChar & 0xff) >> 6;
+                                       ulong l2 = (ulong) (1L << (curChar & 63));
+                                       do 
+                                       {
+                                               switch (jjstateSet[--i])
+                                               {
+                                                       
+                                                       case 36: 
+                                                       case 25: 
+                                                               if (!JjCanMove_2(hiByte, i1, i2, l1, l2))
+                                                                       break;
+                                                               if (kind > 22)
+                                                                       kind = 22;
+                                                               JjCheckNAddTwoStates(25, 26);
+                                                               break;
+                                                       
+                                                       case 0: 
+                                                               if (JjCanMove_0(hiByte, i1, i2, l1, l2))
+                                                               {
+                                                                       if (kind > 7)
+                                                                               kind = 7;
+                                                               }
+                                                               if (JjCanMove_2(hiByte, i1, i2, l1, l2))
+                                                               {
+                                                                       if (kind > 22)
+                                                                               kind = 22;
+                                                                       JjCheckNAddTwoStates(25, 26);
+                                                               }
+                                                               if (JjCanMove_2(hiByte, i1, i2, l1, l2))
+                                                               {
+                                                                       if (kind > 19)
+                                                                               kind = 19;
+                                                                       JjCheckNAddStates(3, 7);
+                                                               }
+                                                               break;
+                                                       
+                                                       case 15: 
+                                                       case 17: 
+                                                               if (JjCanMove_1(hiByte, i1, i2, l1, l2))
+                                                                       JjCheckNAddStates(0, 2);
+                                                               break;
+                                                       
+                                                       case 24: 
+                                                               if (!JjCanMove_2(hiByte, i1, i2, l1, l2))
+                                                                       break;
+                                                               if (kind > 22)
+                                                                       kind = 22;
+                                                               JjCheckNAddTwoStates(25, 26);
+                                                               break;
+                                                       
+                                                       case 27: 
+                                                               if (!JjCanMove_1(hiByte, i1, i2, l1, l2))
+                                                                       break;
+                                                               if (kind > 22)
+                                                                       kind = 22;
+                                                               JjCheckNAddTwoStates(25, 26);
+                                                               break;
+                                                       
+                                                       case 28: 
+                                                               if (!JjCanMove_2(hiByte, i1, i2, l1, l2))
+                                                                       break;
+                                                               if (kind > 19)
+                                                                       kind = 19;
+                                                               JjCheckNAddStates(3, 7);
+                                                               break;
+                                                       
+                                                       case 29: 
+                                                               if (!JjCanMove_2(hiByte, i1, i2, l1, l2))
+                                                                       break;
+                                                               if (kind > 19)
+                                                                       kind = 19;
+                                                               JjCheckNAddTwoStates(29, 30);
+                                                               break;
+                                                       
+                                                       case 31: 
+                                                               if (!JjCanMove_1(hiByte, i1, i2, l1, l2))
+                                                                       break;
+                                                               if (kind > 19)
+                                                                       kind = 19;
+                                                               JjCheckNAddTwoStates(29, 30);
+                                                               break;
+                                                       
+                                                       case 32: 
+                                                               if (JjCanMove_2(hiByte, i1, i2, l1, l2))
+                                                                       JjCheckNAddStates(10, 12);
+                                                               break;
+                                                       
+                                                       case 34: 
+                                                               if (JjCanMove_1(hiByte, i1, i2, l1, l2))
+                                                                       JjCheckNAddStates(10, 12);
+                                                               break;
+                                                       
+                                                       default:  break;
+                                                       
+                                               }
+                                       }
+                                       while (i != startsAt);
+                               }
+                               if (kind != 0x7fffffff)
+                               {
+                                       jjmatchedKind = kind;
+                                       jjmatchedPos = curPos;
+                                       kind = 0x7fffffff;
+                               }
+                               ++curPos;
+                               if ((i = jjnewStateCnt) == (startsAt = 36 - (jjnewStateCnt = startsAt)))
+                                       return curPos;
+                               try
+                               {
+                                       curChar = input_stream.ReadChar();
+                               }
+                               catch (System.IO.IOException e)
+                               {
+                                       return curPos;
+                               }
+                       }
+               }
+               private int JjStopStringLiteralDfa_1(int pos, long active0)
+               {
+                       switch (pos)
+                       {
+                               
+                               case 0: 
+                                       if ((active0 & 0x40000000L) != 0L)
+                                       {
+                                               jjmatchedKind = 33;
+                                               return 6;
+                                       }
+                                       return - 1;
+                               
+                               default: 
+                                       return - 1;
+                               
+                       }
+               }
+               private int JjStartNfa_1(int pos, long active0)
+               {
+                       return JjMoveNfa_1(JjStopStringLiteralDfa_1(pos, active0), pos + 1);
+               }
+               private int JjMoveStringLiteralDfa0_1()
+               {
+                       switch (curChar)
+                       {
+                               
+                               case (char) (84): 
+                                       return JjMoveStringLiteralDfa1_1(0x40000000L);
+                               
+                               case (char) (125): 
+                                       return JjStopAtPos(0, 31);
+                               
+                               default: 
+                                       return JjMoveNfa_1(0, 0);
+                               
+                       }
+               }
+               private int JjMoveStringLiteralDfa1_1(long active0)
+               {
+                       try
+                       {
+                               curChar = input_stream.ReadChar();
+                       }
+                       catch (System.IO.IOException e)
+                       {
+                               JjStopStringLiteralDfa_1(0, active0);
+                               return 1;
+                       }
+                       switch (curChar)
+                       {
+                               
+                               case (char) (79): 
+                                       if ((active0 & 0x40000000L) != 0L)
+                                               return JjStartNfaWithStates_1(1, 30, 6);
+                                       break;
+                               
+                               default: 
+                                       break;
+                               
+                       }
+                       return JjStartNfa_1(0, active0);
+               }
+               private int JjStartNfaWithStates_1(int pos, int kind, int state)
+               {
+                       jjmatchedKind = kind;
+                       jjmatchedPos = pos;
+                       try
+                       {
+                               curChar = input_stream.ReadChar();
+                       }
+                       catch (System.IO.IOException e)
+                       {
+                               return pos + 1;
+                       }
+                       return JjMoveNfa_1(state, pos + 1);
+               }
+               private int JjMoveNfa_1(int startState, int curPos)
+               {
+                       int startsAt = 0;
+                       jjnewStateCnt = 7;
+                       int i = 1;
+                       jjstateSet[0] = startState;
+                       int kind = 0x7fffffff;
+                       for (; ; )
+                       {
+                               if (++jjround == 0x7fffffff)
+                                       ReInitRounds();
+                               if (curChar < 64)
+                               {
+                                       ulong l = (ulong) (1L << (int) curChar);
+                                       do 
+                                       {
+                                               switch (jjstateSet[--i])
+                                               {
+                                                       
+                                                       case 0: 
+                                                               if ((0xfffffffeffffffffL & l) != (ulong) 0L)
+                                                               {
+                                                                       if (kind > 33)
+                                                                               kind = 33;
+                                                                       JjCheckNAdd(6);
+                                                               }
+                                                               if ((0x100002600L & l) != 0L)
+                                                               {
+                                                                       if (kind > 7)
+                                                                               kind = 7;
+                                                               }
+                                                               else if (curChar == 34)
+                                                                       JjCheckNAddTwoStates(2, 4);
+                                                               break;
+                                                       
+                                                       case 1: 
+                                                               if (curChar == 34)
+                                                                       JjCheckNAddTwoStates(2, 4);
+                                                               break;
+                                                       
+                                                       case 2: 
+                                                               if ((0xfffffffbffffffffL & l) != (ulong) 0L)
+                                                                       JjCheckNAddStates(16, 18);
+                                                               break;
+                                                       
+                                                       case 3: 
+                                                               if (curChar == 34)
+                                                                       JjCheckNAddStates(16, 18);
+                                                               break;
+                                                       
+                                                       case 5: 
+                                                               if (curChar == 34 && kind > 32)
+                                                                       kind = 32;
+                                                               break;
+                                                       
+                                                       case 6: 
+                                                               if ((0xfffffffeffffffffL & l) == (ulong) 0L)
+                                                                       break;
+                                                               if (kind > 33)
+                                                                       kind = 33;
+                                                               JjCheckNAdd(6);
+                                                               break;
+                                                       
+                                                       default:  break;
+                                                       
+                                               }
+                                       }
+                                       while (i != startsAt);
+                               }
+                               else if (curChar < 128)
+                               {
+                                       ulong l = (ulong) (1L << (curChar & 63));
+                                       do 
+                                       {
+                                               switch (jjstateSet[--i])
+                                               {
+                                                       
+                                                       case 0: 
+                                                       case 6: 
+                                                               if ((0xdfffffffffffffffL & l) == (ulong) 0L)
+                                                                       break;
+                                                               if (kind > 33)
+                                                                       kind = 33;
+                                                               JjCheckNAdd(6);
+                                                               break;
+                                                       
+                                                       case 2: 
+                                                               JjAddStates(16, 18);
+                                                               break;
+                                                       
+                                                       case 4: 
+                                                               if (curChar == 92)
+                                                                       jjstateSet[jjnewStateCnt++] = 3;
+                                                               break;
+                                                       
+                                                       default:  break;
+                                                       
+                                               }
+                                       }
+                                       while (i != startsAt);
+                               }
+                               else
+                               {
+                                       int hiByte = (int) (curChar >> 8);
+                                       int i1 = hiByte >> 6;
+                                       ulong l1 = (ulong) (1L << (hiByte & 63));
+                                       int i2 = (curChar & 0xff) >> 6;
+                                       ulong l2 = (ulong) (1L << (curChar & 63));
+                                       do 
+                                       {
+                                               switch (jjstateSet[--i])
+                                               {
+                                                       
+                                                       case 0: 
+                                                               if (JjCanMove_0(hiByte, i1, i2, l1, l2))
+                                                               {
+                                                                       if (kind > 7)
+                                                                               kind = 7;
+                                                               }
+                                                               if (JjCanMove_1(hiByte, i1, i2, l1, l2))
+                                                               {
+                                                                       if (kind > 33)
+                                                                               kind = 33;
+                                                                       JjCheckNAdd(6);
+                                                               }
+                                                               break;
+                                                       
+                                                       case 2: 
+                                                               if (JjCanMove_1(hiByte, i1, i2, l1, l2))
+                                                                       JjAddStates(16, 18);
+                                                               break;
+                                                       
+                                                       case 6: 
+                                                               if (!JjCanMove_1(hiByte, i1, i2, l1, l2))
+                                                                       break;
+                                                               if (kind > 33)
+                                                                       kind = 33;
+                                                               JjCheckNAdd(6);
+                                                               break;
+                                                       
+                                                       default:  break;
+                                                       
+                                               }
+                                       }
+                                       while (i != startsAt);
+                               }
+                               if (kind != 0x7fffffff)
+                               {
+                                       jjmatchedKind = kind;
+                                       jjmatchedPos = curPos;
+                                       kind = 0x7fffffff;
+                               }
+                               ++curPos;
+                               if ((i = jjnewStateCnt) == (startsAt = 7 - (jjnewStateCnt = startsAt)))
+                                       return curPos;
+                               try
+                               {
+                                       curChar = input_stream.ReadChar();
+                               }
+                               catch (System.IO.IOException e)
+                               {
+                                       return curPos;
+                               }
+                       }
+               }
+               private int JjMoveStringLiteralDfa0_0()
+               {
+                       return JjMoveNfa_0(0, 0);
+               }
+               private int JjMoveNfa_0(int startState, int curPos)
+               {
+                       int startsAt = 0;
+                       jjnewStateCnt = 3;
+                       int i = 1;
+                       jjstateSet[0] = startState;
+                       int kind = 0x7fffffff;
+                       for (; ; )
+                       {
+                               if (++jjround == 0x7fffffff)
+                                       ReInitRounds();
+                               if (curChar < 64)
+                               {
+                                       ulong l = (ulong) (1L << (int) curChar);
+                                       do 
+                                       {
+                                               switch (jjstateSet[--i])
+                                               {
+                                                       
+                                                       case 0: 
+                                                               if ((0x3ff000000000000L & l) == 0L)
+                                                                       break;
+                                                               if (kind > 25)
+                                                                       kind = 25;
+                                                               JjAddStates(19, 20);
+                                                               break;
+                                                       
+                                                       case 1: 
+                                                               if (curChar == 46)
+                                                                       JjCheckNAdd(2);
+                                                               break;
+                                                       
+                                                       case 2: 
+                                                               if ((0x3ff000000000000L & l) == 0L)
+                                                                       break;
+                                                               if (kind > 25)
+                                                                       kind = 25;
+                                                               JjCheckNAdd(2);
+                                                               break;
+                                                       
+                                                       default:  break;
+                                                       
+                                               }
+                                       }
+                                       while (i != startsAt);
+                               }
+                               else if (curChar < 128)
+                               {
+                                       ulong l = (ulong) (1L << (curChar & 63));
+                                       do 
+                                       {
+                                               switch (jjstateSet[--i])
+                                               {
+                                                       
+                                                       default:  break;
+                                                       
+                                               }
+                                       }
+                                       while (i != startsAt);
+                               }
+                               else
+                               {
+                                       int hiByte = (int) (curChar >> 8);
+                                       int i1 = hiByte >> 6;
+                                       long l1 = 1L << (hiByte & 63);
+                                       int i2 = (curChar & 0xff) >> 6;
+                                       long l2 = 1L << (curChar & 63);
+                                       do 
+                                       {
+                                               switch (jjstateSet[--i])
+                                               {
+                                                       
+                                                       default:  break;
+                                                       
+                                               }
+                                       }
+                                       while (i != startsAt);
+                               }
+                               if (kind != 0x7fffffff)
+                               {
+                                       jjmatchedKind = kind;
+                                       jjmatchedPos = curPos;
+                                       kind = 0x7fffffff;
+                               }
+                               ++curPos;
+                               if ((i = jjnewStateCnt) == (startsAt = 3 - (jjnewStateCnt = startsAt)))
+                                       return curPos;
+                               try
+                               {
+                                       curChar = input_stream.ReadChar();
+                               }
+                               catch (System.IO.IOException e)
+                               {
+                                       return curPos;
+                               }
+                       }
+               }
+               private int JjStopStringLiteralDfa_2(int pos, long active0)
+               {
+                       switch (pos)
+                       {
+                               
+                               case 0: 
+                                       if ((active0 & 0x4000000L) != 0L)
+                                       {
+                                               jjmatchedKind = 29;
+                                               return 6;
+                                       }
+                                       return - 1;
+                               
+                               default: 
+                                       return - 1;
+                               
+                       }
+               }
+               private int JjStartNfa_2(int pos, long active0)
+               {
+                       return JjMoveNfa_2(JjStopStringLiteralDfa_2(pos, active0), pos + 1);
+               }
+               private int JjMoveStringLiteralDfa0_2()
+               {
+                       switch (curChar)
+                       {
+                               
+                               case (char) (84): 
+                                       return JjMoveStringLiteralDfa1_2(0x4000000L);
+                               
+                               case (char) (93): 
+                                       return JjStopAtPos(0, 27);
+                               
+                               default: 
+                                       return JjMoveNfa_2(0, 0);
+                               
+                       }
+               }
+               private int JjMoveStringLiteralDfa1_2(long active0)
+               {
+                       try
+                       {
+                               curChar = input_stream.ReadChar();
+                       }
+                       catch (System.IO.IOException e)
+                       {
+                               JjStopStringLiteralDfa_2(0, active0);
+                               return 1;
+                       }
+                       switch (curChar)
+                       {
+                               
+                               case (char) (79): 
+                                       if ((active0 & 0x4000000L) != 0L)
+                                               return JjStartNfaWithStates_2(1, 26, 6);
+                                       break;
+                               
+                               default: 
+                                       break;
+                               
+                       }
+                       return JjStartNfa_2(0, active0);
+               }
+               private int JjStartNfaWithStates_2(int pos, int kind, int state)
+               {
+                       jjmatchedKind = kind;
+                       jjmatchedPos = pos;
+                       try
+                       {
+                               curChar = input_stream.ReadChar();
+                       }
+                       catch (System.IO.IOException e)
+                       {
+                               return pos + 1;
+                       }
+                       return JjMoveNfa_2(state, pos + 1);
+               }
+               private int JjMoveNfa_2(int startState, int curPos)
+               {
+                       int startsAt = 0;
+                       jjnewStateCnt = 7;
+                       int i = 1;
+                       jjstateSet[0] = startState;
+                       int kind = 0x7fffffff;
+                       for (; ; )
+                       {
+                               if (++jjround == 0x7fffffff)
+                                       ReInitRounds();
+                               if (curChar < 64)
+                               {
+                                       ulong l = (ulong) (1L << (int) curChar);
+                                       do 
+                                       {
+                                               switch (jjstateSet[--i])
+                                               {
+                                                       
+                                                       case 0: 
+                                                               if ((0xfffffffeffffffffL & l) != (ulong) 0L)
+                                                               {
+                                                                       if (kind > 29)
+                                                                               kind = 29;
+                                                                       JjCheckNAdd(6);
+                                                               }
+                                                               if ((0x100002600L & l) != 0L)
+                                                               {
+                                                                       if (kind > 7)
+                                                                               kind = 7;
+                                                               }
+                                                               else if (curChar == 34)
+                                                                       JjCheckNAddTwoStates(2, 4);
+                                                               break;
+                                                       
+                                                       case 1: 
+                                                               if (curChar == 34)
+                                                                       JjCheckNAddTwoStates(2, 4);
+                                                               break;
+                                                       
+                                                       case 2: 
+                                                               if ((0xfffffffbffffffffL & l) != (ulong) 0L)
+                                                                       JjCheckNAddStates(16, 18);
+                                                               break;
+                                                       
+                                                       case 3: 
+                                                               if (curChar == 34)
+                                                                       JjCheckNAddStates(16, 18);
+                                                               break;
+                                                       
+                                                       case 5: 
+                                                               if (curChar == 34 && kind > 28)
+                                                                       kind = 28;
+                                                               break;
+                                                       
+                                                       case 6: 
+                                                               if ((0xfffffffeffffffffL & l) == (ulong) 0L)
+                                                                       break;
+                                                               if (kind > 29)
+                                                                       kind = 29;
+                                                               JjCheckNAdd(6);
+                                                               break;
+                                                       
+                                                       default:  break;
+                                                       
+                                               }
+                                       }
+                                       while (i != startsAt);
+                               }
+                               else if (curChar < 128)
+                               {
+                                       ulong l = (ulong) (1L << (curChar & 63));
+                                       do 
+                                       {
+                                               switch (jjstateSet[--i])
+                                               {
+                                                       
+                                                       case 0: 
+                                                       case 6: 
+                                                               if ((0xffffffffdfffffffL & l) == (ulong) 0L)
+                                                                       break;
+                                                               if (kind > 29)
+                                                                       kind = 29;
+                                                               JjCheckNAdd(6);
+                                                               break;
+                                                       
+                                                       case 2: 
+                                                               JjAddStates(16, 18);
+                                                               break;
+                                                       
+                                                       case 4: 
+                                                               if (curChar == 92)
+                                                                       jjstateSet[jjnewStateCnt++] = 3;
+                                                               break;
+                                                       
+                                                       default:  break;
+                                                       
+                                               }
+                                       }
+                                       while (i != startsAt);
+                               }
+                               else
+                               {
+                                       int hiByte = (int) (curChar >> 8);
+                                       int i1 = hiByte >> 6;
+                                       ulong l1 = (ulong) (1L << (hiByte & 63));
+                                       int i2 = (curChar & 0xff) >> 6;
+                                       ulong l2 = (ulong) (1L << (curChar & 63));
+                                       do 
+                                       {
+                                               switch (jjstateSet[--i])
+                                               {
+                                                       
+                                                       case 0: 
+                                                               if (JjCanMove_0(hiByte, i1, i2, l1, l2))
+                                                               {
+                                                                       if (kind > 7)
+                                                                               kind = 7;
+                                                               }
+                                                               if (JjCanMove_1(hiByte, i1, i2, l1, l2))
+                                                               {
+                                                                       if (kind > 29)
+                                                                               kind = 29;
+                                                                       JjCheckNAdd(6);
+                                                               }
+                                                               break;
+                                                       
+                                                       case 2: 
+                                                               if (JjCanMove_1(hiByte, i1, i2, l1, l2))
+                                                                       JjAddStates(16, 18);
+                                                               break;
+                                                       
+                                                       case 6: 
+                                                               if (!JjCanMove_1(hiByte, i1, i2, l1, l2))
+                                                                       break;
+                                                               if (kind > 29)
+                                                                       kind = 29;
+                                                               JjCheckNAdd(6);
+                                                               break;
+                                                       
+                                                       default:  break;
+                                                       
+                                               }
+                                       }
+                                       while (i != startsAt);
+                               }
+                               if (kind != 0x7fffffff)
+                               {
+                                       jjmatchedKind = kind;
+                                       jjmatchedPos = curPos;
+                                       kind = 0x7fffffff;
+                               }
+                               ++curPos;
+                               if ((i = jjnewStateCnt) == (startsAt = 7 - (jjnewStateCnt = startsAt)))
+                                       return curPos;
+                               try
+                               {
+                                       curChar = input_stream.ReadChar();
+                               }
+                               catch (System.IO.IOException e)
+                               {
+                                       return curPos;
+                               }
+                       }
+               }
+               internal static readonly int[] jjnextStates = new int[]{15, 16, 18, 29, 32, 23, 33, 30, 20, 21, 32, 23, 33, 31, 34, 27, 2, 4, 5, 0, 1};
+               private static bool JjCanMove_0(int hiByte, int i1, int i2, ulong l1, ulong l2)
+               {
+                       switch (hiByte)
+                       {
+                               
+                               case 48: 
+                                       return ((jjbitVec0[i2] & l2) != (ulong) 0L);
+                               
+                               default: 
+                                       return false;
+                               
+                       }
+               }
+               private static bool JjCanMove_1(int hiByte, int i1, int i2, ulong l1, ulong l2)
+               {
+                       switch (hiByte)
+                       {
+                               
+                               case 0: 
+                                       return ((jjbitVec3[i2] & l2) != (ulong) 0L);
+                               
+                               default: 
+                                       if ((jjbitVec1[i1] & l1) != (ulong) 0L)
+                                               return true;
+                                       return false;
+                               
+                       }
+               }
+               private static bool JjCanMove_2(int hiByte, int i1, int i2, ulong l1, ulong l2)
+               {
+                       switch (hiByte)
+                       {
+                               
+                               case 0: 
+                                       return ((jjbitVec3[i2] & l2) != (ulong) 0L);
+                               
+                               case 48: 
+                                       return ((jjbitVec1[i2] & l2) != (ulong) 0L);
+                               
+                               default: 
+                                       if ((jjbitVec4[i1] & l1) != (ulong) 0L)
+                                               return true;
+                                       return false;
+                               
+                       }
+               }
+               
+               /// <summary>Token literal values. </summary>
+               public static readonly System.String[] jjstrLiteralImages = new System.String[]{"", null, null, null, null, null, null, null, null, null, null, "\x002B", "\x002D", "\x0028", "\x0029", "\x003A", "\x002A", "\x005E", null, null, null, null, null, "\x005B", "\x007B", null, "\x0054\x004F", "\x005D", null, null, "\x0054\x004F", "\x007D", null, null};
+               
+               /// <summary>Lexer state names. </summary>
+               public static readonly System.String[] lexStateNames = new System.String[]{"Boost", "RangeEx", "RangeIn", "DEFAULT"};
+               
+               /// <summary>Lex State array. </summary>
+               public static readonly int[] jjnewLexState = new int[]{- 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, 0, - 1, - 1, - 1, - 1, - 1, 2, 1, 3, - 1, 3, - 1, - 1, - 1, 3, - 1, - 1};
+               internal static readonly ulong[] jjtoToken = new ulong[]{0x3ffffff01L};
+               internal static readonly long[] jjtoSkip = new long[]{0x80L};
+               protected internal CharStream input_stream;
+               private uint[] jjrounds = new uint[36];
+               private int[] jjstateSet = new int[72];
+               protected internal char curChar;
+               /// <summary>Constructor. </summary>
+               public QueryParserTokenManager(CharStream stream)
+               {
+                       InitBlock();
+                       input_stream = stream;
+               }
+               
+               /// <summary>Constructor. </summary>
+               public QueryParserTokenManager(CharStream stream, int lexState):this(stream)
+               {
+                       SwitchTo(lexState);
+               }
+               
+               /// <summary>Reinitialise parser. </summary>
+               public virtual void  ReInit(CharStream stream)
+               {
+                       jjmatchedPos = jjnewStateCnt = 0;
+                       curLexState = defaultLexState;
+                       input_stream = stream;
+                       ReInitRounds();
+               }
+               private void  ReInitRounds()
+               {
+                       int i;
+                       jjround = 0x80000001;
+                       for (i = 36; i-- > 0; )
+                               jjrounds[i] = 0x80000000;
+               }
+               
+               /// <summary>Reinitialise parser. </summary>
+               public virtual void  ReInit(CharStream stream, int lexState)
+               {
+                       ReInit(stream);
+                       SwitchTo(lexState);
+               }
+               
+               /// <summary>Switch to specified lex state. </summary>
+               public virtual void  SwitchTo(int lexState)
+               {
+                       if (lexState >= 4 || lexState < 0)
+                               throw new TokenMgrError("Error: Ignoring invalid lexical state : " + lexState + ". State unchanged.", TokenMgrError.INVALID_LEXICAL_STATE);
+                       else
+                               curLexState = lexState;
+               }
+               
+               protected internal virtual Token JjFillToken()
+               {
+                       Token t;
+                       System.String curTokenImage;
+                       int beginLine;
+                       int endLine;
+                       int beginColumn;
+                       int endColumn;
+                       System.String im = jjstrLiteralImages[jjmatchedKind];
+                       curTokenImage = (im == null)?input_stream.GetImage():im;
+                       beginLine = input_stream.GetBeginLine();
+                       beginColumn = input_stream.GetBeginColumn();
+                       endLine = input_stream.GetEndLine();
+                       endColumn = input_stream.GetEndColumn();
+                       t = Token.NewToken(jjmatchedKind, curTokenImage);
+                       
+                       t.beginLine = beginLine;
+                       t.endLine = endLine;
+                       t.beginColumn = beginColumn;
+                       t.endColumn = endColumn;
+                       
+                       return t;
+               }
+               
+               internal int curLexState = 3;
+               internal int defaultLexState = 3;
+               internal int jjnewStateCnt;
+               internal uint jjround;
+               internal int jjmatchedPos;
+               internal int jjmatchedKind;
+               
+               /// <summary>Get the next Token. </summary>
+               public virtual Token GetNextToken()
+               {
+                       Token matchedToken;
+                       int curPos = 0;
+                       
+                       for (; ; )
+                       {
+                               try
+                               {
+                                       curChar = input_stream.BeginToken();
+                               }
+                               catch (System.IO.IOException e)
+                               {
+                                       jjmatchedKind = 0;
+                                       matchedToken = JjFillToken();
+                                       return matchedToken;
+                               }
+                               
+                               switch (curLexState)
+                               {
+                                       
+                                       case 0: 
+                                               jjmatchedKind = 0x7fffffff;
+                                               jjmatchedPos = 0;
+                                               curPos = JjMoveStringLiteralDfa0_0();
+                                               break;
+                                       
+                                       case 1: 
+                                               jjmatchedKind = 0x7fffffff;
+                                               jjmatchedPos = 0;
+                                               curPos = JjMoveStringLiteralDfa0_1();
+                                               break;
+                                       
+                                       case 2: 
+                                               jjmatchedKind = 0x7fffffff;
+                                               jjmatchedPos = 0;
+                                               curPos = JjMoveStringLiteralDfa0_2();
+                                               break;
+                                       
+                                       case 3: 
+                                               jjmatchedKind = 0x7fffffff;
+                                               jjmatchedPos = 0;
+                                               curPos = JjMoveStringLiteralDfa0_3();
+                                               break;
+                                       }
+                               if (jjmatchedKind != 0x7fffffff)
+                               {
+                                       if (jjmatchedPos + 1 < curPos)
+                                               input_stream.Backup(curPos - jjmatchedPos - 1);
+                                       if ((jjtoToken[jjmatchedKind >> 6] & ((ulong) 1L << (jjmatchedKind & 63))) != (ulong) 0L)
+                                       {
+                                               matchedToken = JjFillToken();
+                                               if (jjnewLexState[jjmatchedKind] != - 1)
+                                                       curLexState = jjnewLexState[jjmatchedKind];
+                                               return matchedToken;
+                                       }
+                                       else
+                                       {
+                                               if (jjnewLexState[jjmatchedKind] != - 1)
+                                                       curLexState = jjnewLexState[jjmatchedKind];
+                                               goto EOFLoop;
+                                       }
+                               }
+                               int error_line = input_stream.GetEndLine();
+                               int error_column = input_stream.GetEndColumn();
+                               System.String error_after = null;
+                               bool EOFSeen = false;
+                               try
+                               {
+                                       input_stream.ReadChar(); input_stream.Backup(1);
+                               }
+                               catch (System.IO.IOException e1)
+                               {
+                                       EOFSeen = true;
+                                       error_after = curPos <= 1?"":input_stream.GetImage();
+                                       if (curChar == '\n' || curChar == '\r')
+                                       {
+                                               error_line++;
+                                               error_column = 0;
+                                       }
+                                       else
+                                               error_column++;
+                               }
+                               if (!EOFSeen)
+                               {
+                                       input_stream.Backup(1);
+                                       error_after = curPos <= 1?"":input_stream.GetImage();
+                               }
+                               throw new TokenMgrError(EOFSeen, curLexState, error_line, error_column, error_after, curChar, TokenMgrError.LEXICAL_ERROR);
+
+EOFLoop: ;
+                       }
+               }
+               
+               private void  JjCheckNAdd(int state)
+               {
+                       if (jjrounds[state] != jjround)
+                       {
+                               jjstateSet[jjnewStateCnt++] = state;
+                               jjrounds[state] = jjround;
+                       }
+               }
+               private void  JjAddStates(int start, int end)
+               {
+                       do 
+                       {
+                               jjstateSet[jjnewStateCnt++] = jjnextStates[start];
+                       }
+                       while (start++ != end);
+               }
+               private void  JjCheckNAddTwoStates(int state1, int state2)
+               {
+                       JjCheckNAdd(state1);
+                       JjCheckNAdd(state2);
+               }
+               
+               private void  JjCheckNAddStates(int start, int end)
+               {
+                       do 
+                       {
+                               JjCheckNAdd(jjnextStates[start]);
+                       }
+                       while (start++ != end);
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/QueryParser/Token.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/QueryParser/Token.cs
new file mode 100644 (file)
index 0000000..5e6206b
--- /dev/null
@@ -0,0 +1,133 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* Generated By:JavaCC: Do not edit this line. Token.java Version 4.1 */
+/* JavaCCOptions:TOKEN_EXTENDS=,KEEP_LINE_COL=null */
+
+using System;
+
+namespace Mono.Lucene.Net.QueryParsers
+{
+       
+       /// <summary> Describes the input token stream.</summary>
+       
+       public class Token
+       {
+               
+               /// <summary> An integer that describes the kind of this token.  This numbering
+               /// system is determined by JavaCCParser, and a table of these numbers is
+               /// stored in the file ...Constants.java.
+               /// </summary>
+               public int kind;
+               
+               /// <summary>The line number of the first character of this Token. </summary>
+               public int beginLine;
+               /// <summary>The column number of the first character of this Token. </summary>
+               public int beginColumn;
+               /// <summary>The line number of the last character of this Token. </summary>
+               public int endLine;
+               /// <summary>The column number of the last character of this Token. </summary>
+               public int endColumn;
+               
+               /// <summary> The string image of the token.</summary>
+               public System.String image;
+               
+               /// <summary> A reference to the next regular (non-special) token from the input
+               /// stream.  If this is the last token from the input stream, or if the
+               /// token manager has not read tokens beyond this one, this field is
+               /// set to null.  This is true only if this token is also a regular
+               /// token.  Otherwise, see below for a description of the contents of
+               /// this field.
+               /// </summary>
+               public Token next;
+               
+               /// <summary> This field is used to access special tokens that occur prior to this
+               /// token, but after the immediately preceding regular (non-special) token.
+               /// If there are no such special tokens, this field is set to null.
+               /// When there are more than one such special token, this field refers
+               /// to the last of these special tokens, which in turn refers to the next
+               /// previous special token through its specialToken field, and so on
+               /// until the first special token (whose specialToken field is null).
+               /// The next fields of special tokens refer to other special tokens that
+               /// immediately follow it (without an intervening regular token).  If there
+               /// is no such token, this field is null.
+               /// </summary>
+               public Token specialToken;
+               
+               /// <summary> An optional attribute value of the Token.
+               /// Tokens which are not used as syntactic sugar will often contain
+               /// meaningful values that will be used later on by the compiler or
+               /// interpreter. This attribute value is often different from the image.
+               /// Any subclass of Token that actually wants to return a non-null value can
+               /// override this method as appropriate.
+               /// </summary>
+               public virtual System.Object GetValue()
+               {
+                       return null;
+               }
+               
+               /// <summary> No-argument constructor</summary>
+               public Token()
+               {
+               }
+               
+               /// <summary> Constructs a new token for the specified Image.</summary>
+               public Token(int kind):this(kind, null)
+               {
+               }
+               
+               /// <summary> Constructs a new token for the specified Image and Kind.</summary>
+               public Token(int kind, System.String image)
+               {
+                       this.kind = kind;
+                       this.image = image;
+               }
+               
+               /// <summary> Returns the image.</summary>
+               public override System.String ToString()
+               {
+                       return image;
+               }
+               
+               /// <summary> Returns a new Token object, by default. However, if you want, you
+               /// can create and return subclass objects based on the value of ofKind.
+               /// Simply add the cases to the switch for all those special cases.
+               /// For example, if you have a subclass of Token called IDToken that
+               /// you want to create if ofKind is ID, simply add something like :
+               /// 
+               /// case MyParserConstants.ID : return new IDToken(ofKind, image);
+               /// 
+               /// to the following switch statement. Then you can cast matchedToken
+               /// variable to the appropriate type and use sit in your lexical actions.
+               /// </summary>
+               public static Token NewToken(int ofKind, System.String image)
+               {
+                       switch (ofKind)
+                       {
+                               
+                               default:  return new Token(ofKind, image);
+                               
+                       }
+               }
+               
+               public static Token NewToken(int ofKind)
+               {
+                       return NewToken(ofKind, null);
+               }
+       }
+       /* JavaCC - OriginalChecksum=37b1923f964a5a434f5ea3d6952ff200 (do not edit this line) */
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/QueryParser/TokenMgrError.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/QueryParser/TokenMgrError.cs
new file mode 100644 (file)
index 0000000..0dbb1d6
--- /dev/null
@@ -0,0 +1,169 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* Generated By:JavaCC: Do not edit this line. TokenMgrError.java Version 4.1 */
+/* JavaCCOptions: */
+
+using System;
+
+namespace Mono.Lucene.Net.QueryParsers
+{
+       
+       /// <summary>Token Manager Error. </summary>
+       [Serializable]
+       public class TokenMgrError:System.ApplicationException
+       {
+               /// <summary> You can also modify the body of this method to customize your error messages.
+               /// For example, cases like LOOP_DETECTED and INVALID_LEXICAL_STATE are not
+               /// of end-users concern, so you can return something like :
+               /// 
+               /// "Internal Error : Please file a bug report .... "
+               /// 
+               /// from this method for such cases in the release version of your parser.
+               /// </summary>
+               public override System.String Message
+               {
+                       get
+                       {
+                               return base.Message;
+                       }
+                       
+               }
+               
+               /*
+               * Ordinals for various reasons why an Error of this type can be thrown.
+               */
+               
+               /// <summary> Lexical error occurred.</summary>
+               internal const int LEXICAL_ERROR = 0;
+               
+               /// <summary> An attempt was made to create a second instance of a static token manager.</summary>
+               internal const int STATIC_LEXER_ERROR = 1;
+               
+               /// <summary> Tried to change to an invalid lexical state.</summary>
+               internal const int INVALID_LEXICAL_STATE = 2;
+               
+               /// <summary> Detected (and bailed out of) an infinite loop in the token manager.</summary>
+               internal const int LOOP_DETECTED = 3;
+               
+               /// <summary> Indicates the reason why the exception is thrown. It will have
+               /// one of the above 4 values.
+               /// </summary>
+               internal int errorCode;
+               
+               /// <summary> Replaces unprintable characters by their escaped (or unicode escaped)
+               /// equivalents in the given string
+               /// </summary>
+               protected internal static System.String addEscapes(System.String str)
+               {
+                       System.Text.StringBuilder retval = new System.Text.StringBuilder();
+                       char ch;
+                       for (int i = 0; i < str.Length; i++)
+                       {
+                               switch (str[i])
+                               {
+                                       
+                                       case (char) (0): 
+                                               continue;
+                                       
+                                       case '\b': 
+                                               retval.Append("\\b");
+                                               continue;
+                                       
+                                       case '\t': 
+                                               retval.Append("\\t");
+                                               continue;
+                                       
+                                       case '\n': 
+                                               retval.Append("\\n");
+                                               continue;
+                                       
+                                       case '\f': 
+                                               retval.Append("\\f");
+                                               continue;
+                                       
+                                       case '\r': 
+                                               retval.Append("\\r");
+                                               continue;
+                                       
+                                       case '\"': 
+                                               retval.Append("\\\"");
+                                               continue;
+                                       
+                                       case '\'': 
+                                               retval.Append("\\\'");
+                                               continue;
+                                       
+                                       case '\\': 
+                                               retval.Append("\\\\");
+                                               continue;
+                                       
+                                       default: 
+                                               if ((ch = str[i]) < 0x20 || ch > 0x7e)
+                                               {
+                                                       System.String s = "0000" + System.Convert.ToString(ch, 16);
+                                                       retval.Append("\\u" + s.Substring(s.Length - 4, (s.Length) - (s.Length - 4)));
+                                               }
+                                               else
+                                               {
+                                                       retval.Append(ch);
+                                               }
+                                               continue;
+                                       
+                               }
+                       }
+                       return retval.ToString();
+               }
+               
+               /// <summary> Returns a detailed message for the Error when it is thrown by the
+               /// token manager to indicate a lexical error.
+               /// Parameters :
+               /// EOFSeen     : indicates if EOF caused the lexical error
+               /// curLexState : lexical state in which this error occurred
+               /// errorLine   : line number when the error occurred
+               /// errorColumn : column number when the error occurred
+               /// errorAfter  : prefix that was seen before this error occurred
+               /// curchar     : the offending character
+               /// Note: You can customize the lexical error message by modifying this method.
+               /// </summary>
+               protected internal static System.String LexicalError(bool EOFSeen, int lexState, int errorLine, int errorColumn, System.String errorAfter, char curChar)
+               {
+                       return ("Lexical error at line " + errorLine + ", column " + errorColumn + ".  Encountered: " + (EOFSeen?"<EOF> ":("\"" + addEscapes(System.Convert.ToString(curChar)) + "\"") + " (" + (int) curChar + "), ") + "after : \"" + addEscapes(errorAfter) + "\"");
+               }
+               
+               /*
+               * Constructors of various flavors follow.
+               */
+               
+               /// <summary>No arg constructor. </summary>
+               public TokenMgrError()
+               {
+               }
+               
+               /// <summary>Constructor with message and reason. </summary>
+               public TokenMgrError(System.String message, int reason):base(message)
+               {
+                       errorCode = reason;
+               }
+               
+               /// <summary>Full Constructor. </summary>
+               public TokenMgrError(bool EOFSeen, int lexState, int errorLine, int errorColumn, System.String errorAfter, char curChar, int reason):this(LexicalError(EOFSeen, lexState, errorLine, errorColumn, errorAfter, curChar), reason)
+               {
+               }
+       }
+       /* JavaCC - OriginalChecksum=55cddb2336a66b376c0bb59d916b326d (do not edit this line) */
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/.gitattributes b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/.gitattributes
new file mode 100644 (file)
index 0000000..491e411
--- /dev/null
@@ -0,0 +1,106 @@
+/BooleanClause.cs -crlf
+/BooleanQuery.cs -crlf
+/BooleanScorer.cs -crlf
+/BooleanScorer2.cs -crlf
+/CachingSpanFilter.cs -crlf
+/CachingWrapperFilter.cs -crlf
+/Collector.cs -crlf
+/ComplexExplanation.cs -crlf
+/ConjunctionScorer.cs -crlf
+/ConstantScoreQuery.cs -crlf
+/ConstantScoreRangeQuery.cs -crlf
+/DefaultSimilarity.cs -crlf
+/DisjunctionMaxQuery.cs -crlf
+/DisjunctionMaxScorer.cs -crlf
+/DisjunctionSumScorer.cs -crlf
+/DocIdSet.cs -crlf
+/DocIdSetIterator.cs -crlf
+/ExactPhraseScorer.cs -crlf
+/Explanation.cs -crlf
+/ExtendedFieldCache.cs -crlf
+/FieldCache.cs -crlf
+/FieldCacheImpl.cs -crlf
+/FieldCacheRangeFilter.cs -crlf
+/FieldCacheTermsFilter.cs -crlf
+/FieldComparator.cs -crlf
+/FieldComparatorSource.cs -crlf
+/FieldDoc.cs -crlf
+/FieldDocSortedHitQueue.cs -crlf
+/FieldSortedHitQueue.cs -crlf
+/FieldValueHitQueue.cs -crlf
+/Filter.cs -crlf
+/FilterManager.cs -crlf
+/FilteredDocIdSet.cs -crlf
+/FilteredDocIdSetIterator.cs -crlf
+/FilteredQuery.cs -crlf
+/FilteredTermEnum.cs -crlf
+/Function -crlf
+/FuzzyQuery.cs -crlf
+/FuzzyTermEnum.cs -crlf
+/Hit.cs -crlf
+/HitCollector.cs -crlf
+/HitCollectorWrapper.cs -crlf
+/HitIterator.cs -crlf
+/HitQueue.cs -crlf
+/Hits.cs -crlf
+/IndexSearcher.cs -crlf
+/MatchAllDocsQuery.cs -crlf
+/MultiPhraseQuery.cs -crlf
+/MultiSearcher.cs -crlf
+/MultiTermQuery.cs -crlf
+/MultiTermQueryWrapperFilter.cs -crlf
+/NumericRangeFilter.cs -crlf
+/NumericRangeQuery.cs -crlf
+/Package.html -crlf
+/ParallelMultiSearcher.cs -crlf
+/Payloads -crlf
+/PhrasePositions.cs -crlf
+/PhraseQuery.cs -crlf
+/PhraseQueue.cs -crlf
+/PhraseScorer.cs -crlf
+/PositiveScoresOnlyCollector.cs -crlf
+/PrefixFilter.cs -crlf
+/PrefixQuery.cs -crlf
+/PrefixTermEnum.cs -crlf
+/Query.cs -crlf
+/QueryFilter.cs -crlf
+/QueryTermVector.cs -crlf
+/QueryWrapperFilter.cs -crlf
+/RangeFilter.cs -crlf
+/RangeQuery.cs -crlf
+/ReqExclScorer.cs -crlf
+/ReqOptSumScorer.cs -crlf
+/ScoreCachingWrappingScorer.cs -crlf
+/ScoreDoc.cs -crlf
+/ScoreDocComparator.cs -crlf
+/Scorer.cs -crlf
+/Searchable.cs -crlf
+/Searcher.cs -crlf
+/Similarity.cs -crlf
+/SimilarityDelegator.cs -crlf
+/SloppyPhraseScorer.cs -crlf
+/Sort.cs -crlf
+/SortComparator.cs -crlf
+/SortComparatorSource.cs -crlf
+/SortField.cs -crlf
+/SpanFilter.cs -crlf
+/SpanFilterResult.cs -crlf
+/SpanQueryFilter.cs -crlf
+/Spans -crlf
+/TermQuery.cs -crlf
+/TermRangeFilter.cs -crlf
+/TermRangeQuery.cs -crlf
+/TermRangeTermEnum.cs -crlf
+/TermScorer.cs -crlf
+/TimeLimitedCollector.cs -crlf
+/TimeLimitingCollector.cs -crlf
+/TopDocCollector.cs -crlf
+/TopDocs.cs -crlf
+/TopDocsCollector.cs -crlf
+/TopFieldCollector.cs -crlf
+/TopFieldDocCollector.cs -crlf
+/TopFieldDocs.cs -crlf
+/TopScoreDocCollector.cs -crlf
+/Weight.cs -crlf
+/WildcardQuery.cs -crlf
+/WildcardTermEnum.cs -crlf
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/BooleanClause.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/BooleanClause.cs
new file mode 100644 (file)
index 0000000..50cb4c4
--- /dev/null
@@ -0,0 +1,131 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using Parameter = Mono.Lucene.Net.Util.Parameter;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary>A clause in a BooleanQuery. </summary>
+       [Serializable]
+       public class BooleanClause
+       {
+               
+               /// <summary>Specifies how clauses are to occur in matching documents. </summary>
+               [Serializable]
+               public sealed class Occur:Parameter
+               {
+                       
+                       internal Occur(System.String name):base(name)
+                       {
+                       }
+                       
+                       public override System.String ToString()
+                       {
+                               if (this == MUST)
+                                       return "+";
+                               if (this == MUST_NOT)
+                                       return "-";
+                               return "";
+                       }
+                       
+                       /// <summary>Use this operator for clauses that <i>must</i> appear in the matching documents. </summary>
+                       public static readonly Occur MUST = new Occur("MUST");
+                       /// <summary>Use this operator for clauses that <i>should</i> appear in the 
+                       /// matching documents. For a BooleanQuery with no <code>MUST</code> 
+                       /// clauses one or more <code>SHOULD</code> clauses must match a document 
+                       /// for the BooleanQuery to match.
+                       /// </summary>
+                       /// <seealso cref="BooleanQuery.setMinimumNumberShouldMatch">
+                       /// </seealso>
+                       public static readonly Occur SHOULD = new Occur("SHOULD");
+                       /// <summary>Use this operator for clauses that <i>must not</i> appear in the matching documents.
+                       /// Note that it is not possible to search for queries that only consist
+                       /// of a <code>MUST_NOT</code> clause. 
+                       /// </summary>
+                       public static readonly Occur MUST_NOT = new Occur("MUST_NOT");
+               }
+               
+               /// <summary>The query whose matching documents are combined by the boolean query.</summary>
+               private Query query;
+               
+               private Occur occur;
+               
+               
+               /// <summary>Constructs a BooleanClause.</summary>
+               public BooleanClause(Query query, Occur occur)
+               {
+                       this.query = query;
+                       this.occur = occur;
+               }
+               
+               public virtual Occur GetOccur()
+               {
+                       return occur;
+               }
+               
+               public virtual void  SetOccur(Occur occur)
+               {
+                       this.occur = occur;
+               }
+               
+               public virtual Query GetQuery()
+               {
+                       return query;
+               }
+               
+               public virtual void  SetQuery(Query query)
+               {
+                       this.query = query;
+               }
+               
+               public virtual bool IsProhibited()
+               {
+                       return Occur.MUST_NOT.Equals(occur);
+               }
+               
+               public virtual bool IsRequired()
+               {
+                       return Occur.MUST.Equals(occur);
+               }
+               
+               
+               
+               /// <summary>Returns true if <code>o</code> is equal to this. </summary>
+               public  override bool Equals(System.Object o)
+               {
+                       if (o == null || !(o is BooleanClause))
+                               return false;
+                       BooleanClause other = (BooleanClause) o;
+                       return this.query.Equals(other.query) && this.occur.Equals(other.occur);
+               }
+               
+               /// <summary>Returns a hash code value for this object.</summary>
+               public override int GetHashCode()
+               {
+                       return query.GetHashCode() ^ (Occur.MUST.Equals(occur)?1:0) ^ (Occur.MUST_NOT.Equals(occur)?2:0);
+               }
+               
+               
+               public override System.String ToString()
+               {
+                       return occur.ToString() + query.ToString();
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/BooleanQuery.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/BooleanQuery.cs
new file mode 100644 (file)
index 0000000..481237a
--- /dev/null
@@ -0,0 +1,684 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+using ToStringUtils = Mono.Lucene.Net.Util.ToStringUtils;
+using Occur = Mono.Lucene.Net.Search.BooleanClause.Occur;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary>A Query that matches documents matching boolean combinations of other
+       /// queries, e.g. {@link TermQuery}s, {@link PhraseQuery}s or other
+       /// BooleanQuerys.
+       /// </summary>
+       [Serializable]
+       public class BooleanQuery:Query, System.ICloneable
+       {
+               [Serializable]
+               private class AnonymousClassSimilarityDelegator:SimilarityDelegator
+               {
+                       private void  InitBlock(BooleanQuery enclosingInstance)
+                       {
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private BooleanQuery enclosingInstance;
+                       public BooleanQuery Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       internal AnonymousClassSimilarityDelegator(BooleanQuery enclosingInstance, Mono.Lucene.Net.Search.Similarity Param1):base(Param1)
+                       {
+                               InitBlock(enclosingInstance);
+                       }
+                       public override float Coord(int overlap, int maxOverlap)
+                       {
+                               return 1.0f;
+                       }
+               }
+               
+               private static int maxClauseCount = 1024;
+               
+               /// <summary>Thrown when an attempt is made to add more than {@link
+               /// #GetMaxClauseCount()} clauses. This typically happens if
+               /// a PrefixQuery, FuzzyQuery, WildcardQuery, or TermRangeQuery 
+               /// is expanded to many terms during search. 
+               /// </summary>
+               [Serializable]
+               public class TooManyClauses:System.SystemException
+               {
+                       public override System.String Message
+                       {
+                               get
+                               {
+                                       return "maxClauseCount is set to " + Mono.Lucene.Net.Search.BooleanQuery.maxClauseCount;
+                               }
+                               
+                       }
+                       public TooManyClauses()
+                       {
+                       }
+               }
+               
+               /// <summary>Return the maximum number of clauses permitted, 1024 by default.
+               /// Attempts to add more than the permitted number of clauses cause {@link
+               /// TooManyClauses} to be thrown.
+               /// </summary>
+               /// <seealso cref="SetMaxClauseCount(int)">
+               /// </seealso>
+               public static int GetMaxClauseCount()
+               {
+                       return maxClauseCount;
+               }
+               
+               /// <summary> Set the maximum number of clauses permitted per BooleanQuery.
+               /// Default value is 1024.
+               /// </summary>
+               public static void  SetMaxClauseCount(int maxClauseCount)
+               {
+                       if (maxClauseCount < 1)
+                               throw new System.ArgumentException("maxClauseCount must be >= 1");
+                       BooleanQuery.maxClauseCount = maxClauseCount;
+               }
+               
+               private SupportClass.EquatableList<BooleanClause> clauses = new SupportClass.EquatableList<BooleanClause>();
+               private bool disableCoord;
+               
+               /// <summary>Constructs an empty boolean query. </summary>
+               public BooleanQuery()
+               {
+               }
+               
+               /// <summary>Constructs an empty boolean query.
+               /// 
+               /// {@link Similarity#Coord(int,int)} may be disabled in scoring, as
+               /// appropriate. For example, this score factor does not make sense for most
+               /// automatically generated queries, like {@link WildcardQuery} and {@link
+               /// FuzzyQuery}.
+               /// 
+               /// </summary>
+               /// <param name="disableCoord">disables {@link Similarity#Coord(int,int)} in scoring.
+               /// </param>
+               public BooleanQuery(bool disableCoord)
+               {
+                       this.disableCoord = disableCoord;
+               }
+               
+               /// <summary>Returns true iff {@link Similarity#Coord(int,int)} is disabled in
+               /// scoring for this query instance.
+               /// </summary>
+               /// <seealso cref="BooleanQuery(boolean)">
+               /// </seealso>
+               public virtual bool IsCoordDisabled()
+               {
+                       return disableCoord;
+               }
+               
+               // Implement coord disabling.
+               // Inherit javadoc.
+               public override Similarity GetSimilarity(Searcher searcher)
+               {
+                       Similarity result = base.GetSimilarity(searcher);
+                       if (disableCoord)
+                       {
+                               // disable coord as requested
+                               result = new AnonymousClassSimilarityDelegator(this, result);
+                       }
+                       return result;
+               }
+               
+               /// <summary> Specifies a minimum number of the optional BooleanClauses
+               /// which must be satisfied.
+               /// 
+               /// <p/>
+               /// By default no optional clauses are necessary for a match
+               /// (unless there are no required clauses).  If this method is used,
+               /// then the specified number of clauses is required.
+               /// <p/>
+               /// <p/>
+               /// Use of this method is totally independent of specifying that
+               /// any specific clauses are required (or prohibited).  This number will
+               /// only be compared against the number of matching optional clauses.
+               /// <p/>
+               /// <p/>
+               /// EXPERT NOTE: Using this method may force collecting docs in order,
+               /// regardless of whether setAllowDocsOutOfOrder(true) has been called.
+               /// <p/>
+               /// 
+               /// </summary>
+               /// <param name="min">the number of optional clauses that must match
+               /// </param>
+               /// <seealso cref="setAllowDocsOutOfOrder">
+               /// </seealso>
+               public virtual void  SetMinimumNumberShouldMatch(int min)
+               {
+                       this.minNrShouldMatch = min;
+               }
+               protected internal int minNrShouldMatch = 0;
+               
+               /// <summary> Gets the minimum number of the optional BooleanClauses
+               /// which must be satisifed.
+               /// </summary>
+               public virtual int GetMinimumNumberShouldMatch()
+               {
+                       return minNrShouldMatch;
+               }
+               
+               /// <summary>Adds a clause to a boolean query.
+               /// 
+               /// </summary>
+               /// <throws>  TooManyClauses if the new number of clauses exceeds the maximum clause number </throws>
+               /// <seealso cref="GetMaxClauseCount()">
+               /// </seealso>
+               public virtual void  Add(Query query, BooleanClause.Occur occur)
+               {
+                       Add(new BooleanClause(query, occur));
+               }
+               
+               /// <summary>Adds a clause to a boolean query.</summary>
+               /// <throws>  TooManyClauses if the new number of clauses exceeds the maximum clause number </throws>
+               /// <seealso cref="GetMaxClauseCount()">
+               /// </seealso>
+               public virtual void  Add(BooleanClause clause)
+               {
+                       if (clauses.Count >= maxClauseCount)
+                               throw new TooManyClauses();
+                       
+                       clauses.Add(clause);
+               }
+               
+               /// <summary>Returns the set of clauses in this query. </summary>
+               public virtual BooleanClause[] GetClauses()
+               {
+                       return (BooleanClause[]) clauses.ToArray();
+               }
+               
+               /// <summary>Returns the list of clauses in this query. </summary>
+               public virtual System.Collections.IList Clauses()
+               {
+                       return clauses;
+               }
+               
+               /// <summary> Expert: the Weight for BooleanQuery, used to
+               /// normalize, score and explain these queries.
+               /// 
+               /// <p/>NOTE: this API and implementation is subject to
+               /// change suddenly in the next release.<p/>
+               /// </summary>
+               [Serializable]
+               protected internal class BooleanWeight:Weight
+               {
+                       private void  InitBlock(BooleanQuery enclosingInstance)
+                       {
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private BooleanQuery enclosingInstance;
+                       public BooleanQuery Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       /// <summary>The Similarity implementation. </summary>
+                       protected internal Similarity similarity;
+                       protected internal System.Collections.ArrayList weights;
+                       
+                       public BooleanWeight(BooleanQuery enclosingInstance, Searcher searcher)
+                       {
+                               InitBlock(enclosingInstance);
+                               this.similarity = Enclosing_Instance.GetSimilarity(searcher);
+                               weights = new System.Collections.ArrayList(Enclosing_Instance.clauses.Count);
+                               for (int i = 0; i < Enclosing_Instance.clauses.Count; i++)
+                               {
+                                       BooleanClause c = (BooleanClause) Enclosing_Instance.clauses[i];
+                                       weights.Add(c.GetQuery().CreateWeight(searcher));
+                               }
+                       }
+                       
+                       public override Query GetQuery()
+                       {
+                               return Enclosing_Instance;
+                       }
+                       public override float GetValue()
+                       {
+                               return Enclosing_Instance.GetBoost();
+                       }
+                       
+                       public override float SumOfSquaredWeights()
+                       {
+                               float sum = 0.0f;
+                               for (int i = 0; i < weights.Count; i++)
+                               {
+                                       BooleanClause c = (BooleanClause) Enclosing_Instance.clauses[i];
+                                       Weight w = (Weight) weights[i];
+                                       // call sumOfSquaredWeights for all clauses in case of side effects
+                                       float s = w.SumOfSquaredWeights(); // sum sub weights
+                                       if (!c.IsProhibited())
+                                       // only add to sum for non-prohibited clauses
+                                               sum += s;
+                               }
+                               
+                               sum *= Enclosing_Instance.GetBoost() * Enclosing_Instance.GetBoost(); // boost each sub-weight
+                               
+                               return sum;
+                       }
+                       
+                       
+                       public override void  Normalize(float norm)
+                       {
+                               norm *= Enclosing_Instance.GetBoost(); // incorporate boost
+                               for (System.Collections.IEnumerator iter = weights.GetEnumerator(); iter.MoveNext(); )
+                               {
+                                       Weight w = (Weight) iter.Current;
+                                       // normalize all clauses, (even if prohibited in case of side affects)
+                                       w.Normalize(norm);
+                               }
+                       }
+                       
+                       public override Explanation Explain(IndexReader reader, int doc)
+                       {
+                               int minShouldMatch = Enclosing_Instance.GetMinimumNumberShouldMatch();
+                               ComplexExplanation sumExpl = new ComplexExplanation();
+                               sumExpl.SetDescription("sum of:");
+                               int coord = 0;
+                               int maxCoord = 0;
+                               float sum = 0.0f;
+                               bool fail = false;
+                               int shouldMatchCount = 0;
+                               for (System.Collections.IEnumerator wIter = weights.GetEnumerator(), cIter = Enclosing_Instance.clauses.GetEnumerator(); wIter.MoveNext(); )
+                               {
+                    cIter.MoveNext();
+
+                    Weight w = (Weight)wIter.Current;
+                                       BooleanClause c = (BooleanClause) cIter.Current;
+                                       if (w.Scorer(reader, true, true) == null)
+                                       {
+                                               continue;
+                                       }
+                                       Explanation e = w.Explain(reader, doc);
+                                       if (!c.IsProhibited())
+                                               maxCoord++;
+                                       if (e.IsMatch())
+                                       {
+                                               if (!c.IsProhibited())
+                                               {
+                                                       sumExpl.AddDetail(e);
+                                                       sum += e.GetValue();
+                                                       coord++;
+                                               }
+                                               else
+                                               {
+                                                       Explanation r = new Explanation(0.0f, "match on prohibited clause (" + c.GetQuery().ToString() + ")");
+                                                       r.AddDetail(e);
+                                                       sumExpl.AddDetail(r);
+                                                       fail = true;
+                                               }
+                                               if (c.GetOccur() == Occur.SHOULD)
+                                                       shouldMatchCount++;
+                                       }
+                                       else if (c.IsRequired())
+                                       {
+                                               Explanation r = new Explanation(0.0f, "no match on required clause (" + c.GetQuery().ToString() + ")");
+                                               r.AddDetail(e);
+                                               sumExpl.AddDetail(r);
+                                               fail = true;
+                                       }
+                               }
+                               if (fail)
+                               {
+                                       System.Boolean tempAux = false;
+                                       sumExpl.SetMatch(tempAux);
+                                       sumExpl.SetValue(0.0f);
+                                       sumExpl.SetDescription("Failure to meet condition(s) of required/prohibited clause(s)");
+                                       return sumExpl;
+                               }
+                               else if (shouldMatchCount < minShouldMatch)
+                               {
+                                       System.Boolean tempAux2 = false;
+                                       sumExpl.SetMatch(tempAux2);
+                                       sumExpl.SetValue(0.0f);
+                                       sumExpl.SetDescription("Failure to match minimum number " + "of optional clauses: " + minShouldMatch);
+                                       return sumExpl;
+                               }
+                               
+                               sumExpl.SetMatch(0 < coord?true:false);
+                               sumExpl.SetValue(sum);
+                               
+                               float coordFactor = similarity.Coord(coord, maxCoord);
+                               if (coordFactor == 1.0f)
+                               // coord is no-op
+                                       return sumExpl;
+                               // eliminate wrapper
+                               else
+                               {
+                                       ComplexExplanation result = new ComplexExplanation(sumExpl.IsMatch(), sum * coordFactor, "product of:");
+                                       result.AddDetail(sumExpl);
+                                       result.AddDetail(new Explanation(coordFactor, "coord(" + coord + "/" + maxCoord + ")"));
+                                       return result;
+                               }
+                       }
+                       
+                       public override Scorer Scorer(IndexReader reader, bool scoreDocsInOrder, bool topScorer)
+                       {
+                               System.Collections.IList required = new System.Collections.ArrayList();
+                               System.Collections.IList prohibited = new System.Collections.ArrayList();
+                               System.Collections.IList optional = new System.Collections.ArrayList();
+                               for (System.Collections.IEnumerator wIter = weights.GetEnumerator(), cIter = Enclosing_Instance.clauses.GetEnumerator(); wIter.MoveNext(); )
+                               {
+                    cIter.MoveNext();
+
+                                       Weight w = (Weight) wIter.Current;
+                                       BooleanClause c = (BooleanClause) cIter.Current;
+                                       Scorer subScorer = w.Scorer(reader, true, false);
+                                       if (subScorer == null)
+                                       {
+                                               if (c.IsRequired())
+                                               {
+                                                       return null;
+                                               }
+                                       }
+                                       else if (c.IsRequired())
+                                       {
+                                               required.Add(subScorer);
+                                       }
+                                       else if (c.IsProhibited())
+                                       {
+                                               prohibited.Add(subScorer);
+                                       }
+                                       else
+                                       {
+                                               optional.Add(subScorer);
+                                       }
+                               }
+                               
+                               // Check if we can return a BooleanScorer
+                               scoreDocsInOrder |= !Mono.Lucene.Net.Search.BooleanQuery.allowDocsOutOfOrder; // until it is removed, factor in the static setting.
+                               if (!scoreDocsInOrder && topScorer && required.Count == 0 && prohibited.Count < 32)
+                               {
+                                       return new BooleanScorer(similarity, Enclosing_Instance.minNrShouldMatch, optional, prohibited);
+                               }
+                               
+                               if (required.Count == 0 && optional.Count == 0)
+                               {
+                                       // no required and optional clauses.
+                                       return null;
+                               }
+                               else if (optional.Count < Enclosing_Instance.minNrShouldMatch)
+                               {
+                                       // either >1 req scorer, or there are 0 req scorers and at least 1
+                                       // optional scorer. Therefore if there are not enough optional scorers
+                                       // no documents will be matched by the query
+                                       return null;
+                               }
+                               
+                               // Return a BooleanScorer2
+                               return new BooleanScorer2(similarity, Enclosing_Instance.minNrShouldMatch, required, prohibited, optional);
+                       }
+                       
+                       public override bool ScoresDocsOutOfOrder()
+                       {
+                               int numProhibited = 0;
+                               for (System.Collections.IEnumerator cIter = Enclosing_Instance.clauses.GetEnumerator(); cIter.MoveNext(); )
+                               {
+                                       BooleanClause c = (BooleanClause) cIter.Current;
+                                       if (c.IsRequired())
+                                       {
+                                               return false; // BS2 (in-order) will be used by scorer()
+                                       }
+                                       else if (c.IsProhibited())
+                                       {
+                                               ++numProhibited;
+                                       }
+                               }
+                               
+                               if (numProhibited > 32)
+                               {
+                                       // cannot use BS
+                                       return false;
+                               }
+                               
+                               // scorer() will return an out-of-order scorer if requested.
+                               return true;
+                       }
+               }
+               
+               /// <summary> Whether hit docs may be collected out of docid order.
+               /// 
+               /// </summary>
+               /// <deprecated> this will not be needed anymore, as
+               /// {@link Weight#ScoresDocsOutOfOrder()} is used.
+               /// </deprecated>
+        [Obsolete("this will not be needed anymore, as Weight.ScoresDocsOutOfOrder() is used.")]
+               private static bool allowDocsOutOfOrder = true;
+               
+               /// <summary> Expert: Indicates whether hit docs may be collected out of docid order.
+               /// 
+               /// <p/>
+               /// Background: although the contract of the Scorer class requires that
+               /// documents be iterated in order of doc id, this was not true in early
+               /// versions of Lucene. Many pieces of functionality in the current Lucene code
+               /// base have undefined behavior if this contract is not upheld, but in some
+               /// specific simple cases may be faster. (For example: disjunction queries with
+               /// less than 32 prohibited clauses; This setting has no effect for other
+               /// queries.)
+               /// <p/>
+               /// 
+               /// <p/>
+               /// Specifics: By setting this option to true, docid N might be scored for a
+               /// single segment before docid N-1. Across multiple segments, docs may be
+               /// scored out of order regardless of this setting - it only applies to scoring
+               /// a single segment.
+               /// 
+               /// Being static, this setting is system wide.
+               /// <p/>
+               /// 
+               /// </summary>
+               /// <deprecated> this is not needed anymore, as
+               /// {@link Weight#ScoresDocsOutOfOrder()} is used.
+               /// </deprecated>
+        [Obsolete("this is not needed anymore, as Weight.ScoresDocsOutOfOrder() is used.")]
+               public static void  SetAllowDocsOutOfOrder(bool allow)
+               {
+                       allowDocsOutOfOrder = allow;
+               }
+               
+               /// <summary> Whether hit docs may be collected out of docid order.
+               /// 
+               /// </summary>
+               /// <seealso cref="SetAllowDocsOutOfOrder(boolean)">
+               /// </seealso>
+               /// <deprecated> this is not needed anymore, as
+               /// {@link Weight#ScoresDocsOutOfOrder()} is used.
+               /// </deprecated>
+        [Obsolete("this is not needed anymore, as Weight.ScoresDocsOutOfOrder() is used.")]
+               public static bool GetAllowDocsOutOfOrder()
+               {
+                       return allowDocsOutOfOrder;
+               }
+               
+               /// <deprecated> Use {@link #SetAllowDocsOutOfOrder(boolean)} instead. 
+               /// </deprecated>
+        [Obsolete("Use SetAllowDocsOutOfOrder(bool) instead.")]
+               public static void  SetUseScorer14(bool use14)
+               {
+                       SetAllowDocsOutOfOrder(use14);
+               }
+               
+               /// <deprecated> Use {@link #GetAllowDocsOutOfOrder()} instead.
+               /// </deprecated>
+        [Obsolete("Use GetAllowDocsOutOfOrder() instead.")]
+               public static bool GetUseScorer14()
+               {
+                       return GetAllowDocsOutOfOrder();
+               }
+               
+               public override Weight CreateWeight(Searcher searcher)
+               {
+                       return new BooleanWeight(this, searcher);
+               }
+               
+               public override Query Rewrite(IndexReader reader)
+               {
+                       if (minNrShouldMatch == 0 && clauses.Count == 1)
+                       {
+                               // optimize 1-clause queries
+                               BooleanClause c = (BooleanClause) clauses[0];
+                               if (!c.IsProhibited())
+                               {
+                                       // just return clause
+                                       
+                                       Query query = c.GetQuery().Rewrite(reader); // rewrite first
+                                       
+                                       if (GetBoost() != 1.0f)
+                                       {
+                                               // incorporate boost
+                                               if (query == c.GetQuery())
+                                               // if rewrite was no-op
+                                                       query = (Query) query.Clone(); // then clone before boost
+                                               query.SetBoost(GetBoost() * query.GetBoost());
+                                       }
+                                       
+                                       return query;
+                               }
+                       }
+                       
+                       BooleanQuery clone = null; // recursively rewrite
+                       for (int i = 0; i < clauses.Count; i++)
+                       {
+                               BooleanClause c = (BooleanClause) clauses[i];
+                               Query query = c.GetQuery().Rewrite(reader);
+                               if (query != c.GetQuery())
+                               {
+                                       // clause rewrote: must clone
+                                       if (clone == null)
+                                               clone = (BooleanQuery) this.Clone();
+                                       clone.clauses[i] = new BooleanClause(query, c.GetOccur());
+                               }
+                       }
+                       if (clone != null)
+                       {
+                               return clone; // some clauses rewrote
+                       }
+                       else
+                               return this; // no clauses rewrote
+               }
+               
+               // inherit javadoc
+               public override void  ExtractTerms(System.Collections.Hashtable terms)
+               {
+                       for (System.Collections.IEnumerator i = clauses.GetEnumerator(); i.MoveNext(); )
+                       {
+                               BooleanClause clause = (BooleanClause) i.Current;
+                               clause.GetQuery().ExtractTerms(terms);
+                       }
+               }
+               
+               public override System.Object Clone()
+               {
+                       BooleanQuery clone = (BooleanQuery) base.Clone();
+                       clone.clauses = (SupportClass.EquatableList<BooleanClause>) this.clauses.Clone();
+                       return clone;
+               }
+               
+               /// <summary>Prints a user-readable version of this query. </summary>
+               public override System.String ToString(System.String field)
+               {
+                       System.Text.StringBuilder buffer = new System.Text.StringBuilder();
+                       bool needParens = (GetBoost() != 1.0) || (GetMinimumNumberShouldMatch() > 0);
+                       if (needParens)
+                       {
+                               buffer.Append("(");
+                       }
+                       
+                       for (int i = 0; i < clauses.Count; i++)
+                       {
+                               BooleanClause c = (BooleanClause) clauses[i];
+                               if (c.IsProhibited())
+                                       buffer.Append("-");
+                               else if (c.IsRequired())
+                                       buffer.Append("+");
+                               
+                               Query subQuery = c.GetQuery();
+                               if (subQuery != null)
+                               {
+                                       if (subQuery is BooleanQuery)
+                                       {
+                                               // wrap sub-bools in parens
+                                               buffer.Append("(");
+                                               buffer.Append(subQuery.ToString(field));
+                                               buffer.Append(")");
+                                       }
+                                       else
+                                       {
+                                               buffer.Append(subQuery.ToString(field));
+                                       }
+                               }
+                               else
+                               {
+                                       buffer.Append("null");
+                               }
+                               
+                               if (i != clauses.Count - 1)
+                                       buffer.Append(" ");
+                       }
+                       
+                       if (needParens)
+                       {
+                               buffer.Append(")");
+                       }
+                       
+                       if (GetMinimumNumberShouldMatch() > 0)
+                       {
+                               buffer.Append('~');
+                               buffer.Append(GetMinimumNumberShouldMatch());
+                       }
+                       
+                       if (GetBoost() != 1.0f)
+                       {
+                               buffer.Append(ToStringUtils.Boost(GetBoost()));
+                       }
+                       
+                       return buffer.ToString();
+               }
+               
+               /// <summary>Returns true iff <code>o</code> is equal to this. </summary>
+               public  override bool Equals(System.Object o)
+               {
+            if (!(o is BooleanQuery))
+                return false;
+            BooleanQuery other = (BooleanQuery)o;
+            return (this.GetBoost() == other.GetBoost())
+                    && this.clauses.Equals(other.clauses)
+                    && this.GetMinimumNumberShouldMatch() == other.GetMinimumNumberShouldMatch()
+                    && this.disableCoord == other.disableCoord;
+               }
+               
+               /// <summary>Returns a hash code value for this object.</summary>
+               public override int GetHashCode()
+               {
+            return BitConverter.ToInt32(BitConverter.GetBytes(GetBoost()), 0) ^ clauses.GetHashCode() + GetMinimumNumberShouldMatch() + (disableCoord ? 17 : 0);
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/BooleanScorer.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/BooleanScorer.cs
new file mode 100644 (file)
index 0000000..1102fcc
--- /dev/null
@@ -0,0 +1,479 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /* Description from Doug Cutting (excerpted from
+       * LUCENE-1483):
+       *
+       * BooleanScorer uses a ~16k array to score windows of
+       * docs. So it scores docs 0-16k first, then docs 16-32k,
+       * etc. For each window it iterates through all query terms
+       * and accumulates a score in table[doc%16k]. It also stores
+       * in the table a bitmask representing which terms
+       * contributed to the score. Non-zero scores are chained in
+       * a linked list. At the end of scoring each window it then
+       * iterates through the linked list and, if the bitmask
+       * matches the boolean constraints, collects a hit. For
+       * boolean queries with lots of frequent terms this can be
+       * much faster, since it does not need to update a priority
+       * queue for each posting, instead performing constant-time
+       * operations per posting. The only downside is that it
+       * results in hits being delivered out-of-order within the
+       * window, which means it cannot be nested within other
+       * scorers. But it works well as a top-level scorer.
+       *
+       * The new BooleanScorer2 implementation instead works by
+       * merging priority queues of postings, albeit with some
+       * clever tricks. For example, a pure conjunction (all terms
+       * required) does not require a priority queue. Instead it
+       * sorts the posting streams at the start, then repeatedly
+       * skips the first to to the last. If the first ever equals
+       * the last, then there's a hit. When some terms are
+       * required and some terms are optional, the conjunction can
+       * be evaluated first, then the optional terms can all skip
+       * to the match and be added to the score. Thus the
+       * conjunction can reduce the number of priority queue
+       * updates for the optional terms. */
+       
+       public sealed class BooleanScorer:Scorer
+       {
+               private void  InitBlock()
+               {
+                       bucketTable = new BucketTable();
+               }
+               
+               private sealed class BooleanScorerCollector:Collector
+               {
+                       private BucketTable bucketTable;
+                       private int mask;
+                       private Scorer scorer;
+                       
+                       public BooleanScorerCollector(int mask, BucketTable bucketTable)
+                       {
+                               this.mask = mask;
+                               this.bucketTable = bucketTable;
+                       }
+                       public override void  Collect(int doc)
+                       {
+                               BucketTable table = bucketTable;
+                               int i = doc & Mono.Lucene.Net.Search.BooleanScorer.BucketTable.MASK;
+                               Bucket bucket = table.buckets[i];
+                               if (bucket == null)
+                                       table.buckets[i] = bucket = new Bucket();
+                               
+                               if (bucket.doc != doc)
+                               {
+                                       // invalid bucket
+                                       bucket.doc = doc; // set doc
+                                       bucket.score = scorer.Score(); // initialize score
+                                       bucket.bits = mask; // initialize mask
+                                       bucket.coord = 1; // initialize coord
+                                       
+                                       bucket.next = table.first; // push onto valid list
+                                       table.first = bucket;
+                               }
+                               else
+                               {
+                                       // valid bucket
+                                       bucket.score += scorer.Score(); // increment score
+                                       bucket.bits |= mask; // add bits in mask
+                                       bucket.coord++; // increment coord
+                               }
+                       }
+                       
+                       public override void  SetNextReader(IndexReader reader, int docBase)
+                       {
+                               // not needed by this implementation
+                       }
+                       
+                       public override void  SetScorer(Scorer scorer)
+                       {
+                               this.scorer = scorer;
+                       }
+                       
+                       public override bool AcceptsDocsOutOfOrder()
+                       {
+                               return true;
+                       }
+               }
+               
+               // An internal class which is used in score(Collector, int) for setting the
+               // current score. This is required since Collector exposes a setScorer method
+               // and implementations that need the score will call scorer.score().
+               // Therefore the only methods that are implemented are score() and doc().
+               private sealed class BucketScorer:Scorer
+               {
+                       
+                       internal float score;
+                       internal int doc = NO_MORE_DOCS;
+                       
+                       public BucketScorer():base(null)
+                       {
+                       }
+                       
+                       public override int Advance(int target)
+                       {
+                               return NO_MORE_DOCS;
+                       }
+                       
+                       /// <deprecated> use {@link #DocID()} instead. 
+                       /// </deprecated>
+            [Obsolete("use DocID() instead.")]
+                       public override int Doc()
+                       {
+                               return doc;
+                       }
+                       
+                       public override int DocID()
+                       {
+                               return doc;
+                       }
+                       
+                       public override Explanation Explain(int doc)
+                       {
+                               return null;
+                       }
+                       
+                       /// <deprecated> use {@link #NextDoc()} instead. 
+                       /// </deprecated>
+            [Obsolete("use NextDoc() instead. ")]
+                       public override bool Next()
+                       {
+                               return false;
+                       }
+                       
+                       public override int NextDoc()
+                       {
+                               return NO_MORE_DOCS;
+                       }
+                       
+                       public override float Score()
+                       {
+                               return score;
+                       }
+                       
+                       /// <deprecated> use {@link #Advance(int)} instead. 
+                       /// </deprecated>
+            [Obsolete("use Advance(int) instead. ")]
+                       public override bool SkipTo(int target)
+                       {
+                               return false;
+                       }
+               }
+               
+               internal sealed class Bucket
+               {
+                       internal int doc = - 1; // tells if bucket is valid
+                       internal float score; // incremental score
+                       internal int bits; // used for bool constraints
+                       internal int coord; // count of terms in score
+                       internal Bucket next; // next valid bucket
+               }
+               
+               /// <summary>A simple hash table of document scores within a range. </summary>
+               internal sealed class BucketTable
+               {
+                       private void  InitBlock()
+                       {
+                               buckets = new Bucket[SIZE];
+                       }
+                       public const int SIZE = 1 << 11;
+                       public static readonly int MASK;
+                       
+                       internal Bucket[] buckets;
+                       internal Bucket first = null; // head of valid list
+                       
+                       public BucketTable()
+                       {
+                InitBlock();
+                       }
+                       
+                       public Collector NewCollector(int mask)
+                       {
+                               return new BooleanScorerCollector(mask, this);
+                       }
+                       
+                       public int Size()
+                       {
+                               return SIZE;
+                       }
+                       static BucketTable()
+                       {
+                               MASK = SIZE - 1;
+                       }
+               }
+               
+               internal sealed class SubScorer
+               {
+                       public Scorer scorer;
+                       public bool required = false;
+                       public bool prohibited = false;
+                       public Collector collector;
+                       public SubScorer next;
+                       
+                       public SubScorer(Scorer scorer, bool required, bool prohibited, Collector collector, SubScorer next)
+                       {
+                               this.scorer = scorer;
+                               this.required = required;
+                               this.prohibited = prohibited;
+                               this.collector = collector;
+                               this.next = next;
+                       }
+               }
+               
+               private SubScorer scorers = null;
+               private BucketTable bucketTable;
+               private int maxCoord = 1;
+               private float[] coordFactors;
+               private int requiredMask = 0;
+               private int prohibitedMask = 0;
+               private int nextMask = 1;
+               private int minNrShouldMatch;
+               private int end;
+               private Bucket current;
+               private int doc = - 1;
+               
+               public /*internal*/ BooleanScorer(Similarity similarity, int minNrShouldMatch, System.Collections.IList optionalScorers, System.Collections.IList prohibitedScorers):base(similarity)
+               {
+                       InitBlock();
+                       this.minNrShouldMatch = minNrShouldMatch;
+                       
+                       if (optionalScorers != null && optionalScorers.Count > 0)
+                       {
+                               for (System.Collections.IEnumerator si = optionalScorers.GetEnumerator(); si.MoveNext(); )
+                               {
+                                       Scorer scorer = (Scorer) si.Current;
+                                       maxCoord++;
+                                       if (scorer.NextDoc() != NO_MORE_DOCS)
+                                       {
+                                               scorers = new SubScorer(scorer, false, false, bucketTable.NewCollector(0), scorers);
+                                       }
+                               }
+                       }
+                       
+                       if (prohibitedScorers != null && prohibitedScorers.Count > 0)
+                       {
+                               for (System.Collections.IEnumerator si = prohibitedScorers.GetEnumerator(); si.MoveNext(); )
+                               {
+                                       Scorer scorer = (Scorer) si.Current;
+                                       int mask = nextMask;
+                                       nextMask = nextMask << 1;
+                                       prohibitedMask |= mask; // update prohibited mask
+                                       if (scorer.NextDoc() != NO_MORE_DOCS)
+                                       {
+                                               scorers = new SubScorer(scorer, false, true, bucketTable.NewCollector(mask), scorers);
+                                       }
+                               }
+                       }
+                       
+                       coordFactors = new float[maxCoord];
+                       Similarity sim = GetSimilarity();
+                       for (int i = 0; i < maxCoord; i++)
+                       {
+                               coordFactors[i] = sim.Coord(i, maxCoord - 1);
+                       }
+               }
+               
+               // firstDocID is ignored since nextDoc() initializes 'current'
+               public /*protected internal*/ override bool Score(Collector collector, int max, int firstDocID)
+               {
+                       bool more;
+                       Bucket tmp;
+                       BucketScorer bs = new BucketScorer();
+                       // The internal loop will set the score and doc before calling collect.
+                       collector.SetScorer(bs);
+                       do 
+                       {
+                               bucketTable.first = null;
+                               
+                               while (current != null)
+                               {
+                                       // more queued 
+                                       
+                                       // check prohibited & required
+                                       if ((current.bits & prohibitedMask) == 0 && (current.bits & requiredMask) == requiredMask)
+                                       {
+                                               
+                                               if (current.doc >= max)
+                                               {
+                                                       tmp = current;
+                                                       current = current.next;
+                                                       tmp.next = bucketTable.first;
+                                                       bucketTable.first = tmp;
+                                                       continue;
+                                               }
+                                               
+                                               if (current.coord >= minNrShouldMatch)
+                                               {
+                                                       bs.score = current.score * coordFactors[current.coord];
+                                                       bs.doc = current.doc;
+                                                       collector.Collect(current.doc);
+                                               }
+                                       }
+                                       
+                                       current = current.next; // pop the queue
+                               }
+                               
+                               if (bucketTable.first != null)
+                               {
+                                       current = bucketTable.first;
+                                       bucketTable.first = current.next;
+                                       return true;
+                               }
+                               
+                               // refill the queue
+                               more = false;
+                               end += BucketTable.SIZE;
+                               for (SubScorer sub = scorers; sub != null; sub = sub.next)
+                               {
+                                       int subScorerDocID = sub.scorer.DocID();
+                                       if (subScorerDocID != NO_MORE_DOCS)
+                                       {
+                                               more |= sub.scorer.Score(sub.collector, end, subScorerDocID);
+                                       }
+                               }
+                               current = bucketTable.first;
+                       }
+                       while (current != null || more);
+                       
+                       return false;
+               }
+               
+               /// <deprecated> use {@link #Score(Collector, int, int)} instead. 
+               /// </deprecated>
+        [Obsolete("use Score(Collector, int, int) instead.")]
+               protected internal override bool Score(HitCollector hc, int max)
+               {
+                       return Score(new HitCollectorWrapper(hc), max, DocID());
+               }
+               
+               public override int Advance(int target)
+               {
+                       throw new System.NotSupportedException();
+               }
+               
+               /// <deprecated> use {@link #DocID()} instead. 
+               /// </deprecated>
+        [Obsolete("use DocID() instead. ")]
+               public override int Doc()
+               {
+                       return current.doc;
+               }
+               
+               public override int DocID()
+               {
+                       return doc;
+               }
+               
+               public override Explanation Explain(int doc)
+               {
+                       throw new System.NotSupportedException();
+               }
+               
+               /// <deprecated> use {@link #NextDoc()} instead. 
+               /// </deprecated>
+        [Obsolete("use NextDoc() instead. ")]
+               public override bool Next()
+               {
+                       return NextDoc() != NO_MORE_DOCS;
+               }
+               
+               public override int NextDoc()
+               {
+                       bool more;
+                       do 
+                       {
+                               while (bucketTable.first != null)
+                               {
+                                       // more queued
+                                       current = bucketTable.first;
+                                       bucketTable.first = current.next; // pop the queue
+                                       
+                                       // check prohibited & required, and minNrShouldMatch
+                                       if ((current.bits & prohibitedMask) == 0 && (current.bits & requiredMask) == requiredMask && current.coord >= minNrShouldMatch)
+                                       {
+                                               return doc = current.doc;
+                                       }
+                               }
+                               
+                               // refill the queue
+                               more = false;
+                               end += BucketTable.SIZE;
+                               for (SubScorer sub = scorers; sub != null; sub = sub.next)
+                               {
+                                       Scorer scorer = sub.scorer;
+                                       sub.collector.SetScorer(scorer);
+                                       int doc = scorer.DocID();
+                                       while (doc < end)
+                                       {
+                                               sub.collector.Collect(doc);
+                                               doc = scorer.NextDoc();
+                                       }
+                                       more |= (doc != NO_MORE_DOCS);
+                               }
+                       }
+                       while (bucketTable.first != null || more);
+                       
+                       return this.doc = NO_MORE_DOCS;
+               }
+               
+               public override float Score()
+               {
+                       return current.score * coordFactors[current.coord];
+               }
+               
+               public override void  Score(Collector collector)
+               {
+                       Score(collector, System.Int32.MaxValue, NextDoc());
+               }
+               
+               /// <deprecated> use {@link #Score(Collector)} instead. 
+               /// </deprecated>
+        [Obsolete("use Score(Collector) instead. ")]
+               public override void  Score(HitCollector hc)
+               {
+                       Score(new HitCollectorWrapper(hc));
+               }
+               
+               /// <deprecated> use {@link #Advance(int)} instead. 
+               /// </deprecated>
+        [Obsolete("use Advance(int) instead. ")]
+               public override bool SkipTo(int target)
+               {
+                       throw new System.NotSupportedException();
+               }
+               
+               public override System.String ToString()
+               {
+                       System.Text.StringBuilder buffer = new System.Text.StringBuilder();
+                       buffer.Append("boolean(");
+                       for (SubScorer sub = scorers; sub != null; sub = sub.next)
+                       {
+                               buffer.Append(sub.scorer.ToString());
+                               buffer.Append(" ");
+                       }
+                       buffer.Append(")");
+                       return buffer.ToString();
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/BooleanScorer2.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/BooleanScorer2.cs
new file mode 100644 (file)
index 0000000..8ad17e9
--- /dev/null
@@ -0,0 +1,493 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /* See the description in BooleanScorer.java, comparing
+       * BooleanScorer & BooleanScorer2 */
+       
+       /// <summary>An alternative to BooleanScorer that also allows a minimum number
+       /// of optional scorers that should match.
+       /// <br/>Implements skipTo(), and has no limitations on the numbers of added scorers.
+       /// <br/>Uses ConjunctionScorer, DisjunctionScorer, ReqOptScorer and ReqExclScorer.
+       /// </summary>
+       class BooleanScorer2:Scorer
+       {
+               private class AnonymousClassDisjunctionSumScorer:DisjunctionSumScorer
+               {
+                       private void  InitBlock(BooleanScorer2 enclosingInstance)
+                       {
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private BooleanScorer2 enclosingInstance;
+                       public BooleanScorer2 Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       internal AnonymousClassDisjunctionSumScorer(BooleanScorer2 enclosingInstance, System.Collections.IList Param1, int Param2):base(Param1, Param2)
+                       {
+                               InitBlock(enclosingInstance);
+                       }
+                       private int lastScoredDoc = - 1;
+                       // Save the score of lastScoredDoc, so that we don't compute it more than
+                       // once in score().
+                       private float lastDocScore = System.Single.NaN;
+                       public override float Score()
+                       {
+                               int doc = DocID();
+                               if (doc >= lastScoredDoc)
+                               {
+                                       if (doc > lastScoredDoc)
+                                       {
+                                               lastDocScore = base.Score();
+                                               lastScoredDoc = doc;
+                                       }
+                                       Enclosing_Instance.coordinator.nrMatchers += base.nrMatchers;
+                               }
+                               return lastDocScore;
+                       }
+               }
+               private class AnonymousClassConjunctionScorer:ConjunctionScorer
+               {
+                       private void  InitBlock(int requiredNrMatchers, BooleanScorer2 enclosingInstance)
+                       {
+                               this.requiredNrMatchers = requiredNrMatchers;
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private int requiredNrMatchers;
+                       private BooleanScorer2 enclosingInstance;
+                       public BooleanScorer2 Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       internal AnonymousClassConjunctionScorer(int requiredNrMatchers, BooleanScorer2 enclosingInstance, Mono.Lucene.Net.Search.Similarity Param1, System.Collections.ICollection Param2):base(Param1, Param2)
+                       {
+                               InitBlock(requiredNrMatchers, enclosingInstance);
+                       }
+                       private int lastScoredDoc = - 1;
+                       // Save the score of lastScoredDoc, so that we don't compute it more than
+                       // once in score().
+                       private float lastDocScore = System.Single.NaN;
+                       public override float Score()
+                       {
+                               int doc = DocID();
+                               if (doc >= lastScoredDoc)
+                               {
+                                       if (doc > lastScoredDoc)
+                                       {
+                                               lastDocScore = base.Score();
+                                               lastScoredDoc = doc;
+                                       }
+                                       Enclosing_Instance.coordinator.nrMatchers += requiredNrMatchers;
+                               }
+                               // All scorers match, so defaultSimilarity super.score() always has 1 as
+                               // the coordination factor.
+                               // Therefore the sum of the scores of the requiredScorers
+                               // is used as score.
+                               return lastDocScore;
+                       }
+               }
+               
+               private System.Collections.IList requiredScorers;
+               private System.Collections.IList optionalScorers;
+               private System.Collections.IList prohibitedScorers;
+               
+               private class Coordinator
+               {
+                       public Coordinator(BooleanScorer2 enclosingInstance)
+                       {
+                               InitBlock(enclosingInstance);
+                       }
+                       private void  InitBlock(BooleanScorer2 enclosingInstance)
+                       {
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private BooleanScorer2 enclosingInstance;
+                       public BooleanScorer2 Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       internal float[] coordFactors = null;
+                       internal int maxCoord = 0; // to be increased for each non prohibited scorer
+                       internal int nrMatchers; // to be increased by score() of match counting scorers.
+                       
+                       internal virtual void  Init()
+                       {
+                               // use after all scorers have been added.
+                               coordFactors = new float[maxCoord + 1];
+                               Similarity sim = Enclosing_Instance.GetSimilarity();
+                               for (int i = 0; i <= maxCoord; i++)
+                               {
+                                       coordFactors[i] = sim.Coord(i, maxCoord);
+                               }
+                       }
+               }
+               
+               private Coordinator coordinator;
+               
+               /// <summary>The scorer to which all scoring will be delegated,
+               /// except for computing and using the coordination factor.
+               /// </summary>
+               private Scorer countingSumScorer;
+               
+               /// <summary>The number of optionalScorers that need to match (if there are any) </summary>
+               private int minNrShouldMatch;
+               
+               private int doc = - 1;
+               
+               /// <summary> Creates a {@link Scorer} with the given similarity and lists of required,
+               /// prohibited and optional scorers. In no required scorers are added, at least
+               /// one of the optional scorers will have to match during the search.
+               /// 
+               /// </summary>
+               /// <param name="similarity">The similarity to be used.
+               /// </param>
+               /// <param name="minNrShouldMatch">The minimum number of optional added scorers that should match
+               /// during the search. In case no required scorers are added, at least
+               /// one of the optional scorers will have to match during the search.
+               /// </param>
+               /// <param name="required">the list of required scorers.
+               /// </param>
+               /// <param name="prohibited">the list of prohibited scorers.
+               /// </param>
+               /// <param name="optional">the list of optional scorers.
+               /// </param>
+               public BooleanScorer2(Similarity similarity, int minNrShouldMatch, System.Collections.IList required, System.Collections.IList prohibited, System.Collections.IList optional):base(similarity)
+               {
+                       if (minNrShouldMatch < 0)
+                       {
+                               throw new System.ArgumentException("Minimum number of optional scorers should not be negative");
+                       }
+                       coordinator = new Coordinator(this);
+                       this.minNrShouldMatch = minNrShouldMatch;
+                       
+                       optionalScorers = optional;
+                       coordinator.maxCoord += optional.Count;
+                       
+                       requiredScorers = required;
+                       coordinator.maxCoord += required.Count;
+                       
+                       prohibitedScorers = prohibited;
+                       
+                       coordinator.Init();
+                       countingSumScorer = MakeCountingSumScorer();
+               }
+               
+               /// <summary>Count a scorer as a single match. </summary>
+               private class SingleMatchScorer:Scorer
+               {
+                       private void  InitBlock(BooleanScorer2 enclosingInstance)
+                       {
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private BooleanScorer2 enclosingInstance;
+                       public BooleanScorer2 Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       private Scorer scorer;
+                       private int lastScoredDoc = - 1;
+                       // Save the score of lastScoredDoc, so that we don't compute it more than
+                       // once in score().
+                       private float lastDocScore = System.Single.NaN;
+                       
+                       internal SingleMatchScorer(BooleanScorer2 enclosingInstance, Scorer scorer):base(scorer.GetSimilarity())
+                       {
+                               InitBlock(enclosingInstance);
+                               this.scorer = scorer;
+                       }
+                       public override float Score()
+                       {
+                               int doc = DocID();
+                               if (doc >= lastScoredDoc)
+                               {
+                                       if (doc > lastScoredDoc)
+                                       {
+                                               lastDocScore = scorer.Score();
+                                               lastScoredDoc = doc;
+                                       }
+                                       Enclosing_Instance.coordinator.nrMatchers++;
+                               }
+                               return lastDocScore;
+                       }
+                       /// <deprecated> use {@link #DocID()} instead. 
+                       /// </deprecated>
+            [Obsolete("use DocID() instead. ")]
+                       public override int Doc()
+                       {
+                               return scorer.Doc();
+                       }
+                       public override int DocID()
+                       {
+                               return scorer.DocID();
+                       }
+                       /// <deprecated> use {@link #NextDoc()} instead. 
+                       /// </deprecated>
+            [Obsolete("use NextDoc() instead. ")]
+                       public override bool Next()
+                       {
+                               return scorer.NextDoc() != NO_MORE_DOCS;
+                       }
+                       public override int NextDoc()
+                       {
+                               return scorer.NextDoc();
+                       }
+                       /// <deprecated> use {@link #Advance(int)} instead. 
+                       /// </deprecated>
+            [Obsolete("use Advance(int) instead. ")]
+                       public override bool SkipTo(int docNr)
+                       {
+                               return scorer.Advance(docNr) != NO_MORE_DOCS;
+                       }
+                       public override int Advance(int target)
+                       {
+                               return scorer.Advance(target);
+                       }
+                       public override Explanation Explain(int docNr)
+                       {
+                               return scorer.Explain(docNr);
+                       }
+               }
+               
+               private Scorer CountingDisjunctionSumScorer(System.Collections.IList scorers, int minNrShouldMatch)
+               {
+                       // each scorer from the list counted as a single matcher
+                       return new AnonymousClassDisjunctionSumScorer(this, scorers, minNrShouldMatch);
+               }
+               
+               private static readonly Similarity defaultSimilarity;
+               
+               private Scorer CountingConjunctionSumScorer(System.Collections.IList requiredScorers)
+               {
+                       // each scorer from the list counted as a single matcher
+                       int requiredNrMatchers = requiredScorers.Count;
+                       return new AnonymousClassConjunctionScorer(requiredNrMatchers, this, defaultSimilarity, requiredScorers);
+               }
+               
+               private Scorer DualConjunctionSumScorer(Scorer req1, Scorer req2)
+               {
+                       // non counting.
+                       return new ConjunctionScorer(defaultSimilarity, new Scorer[]{req1, req2});
+                       // All scorers match, so defaultSimilarity always has 1 as
+                       // the coordination factor.
+                       // Therefore the sum of the scores of two scorers
+                       // is used as score.
+               }
+               
+               /// <summary>Returns the scorer to be used for match counting and score summing.
+               /// Uses requiredScorers, optionalScorers and prohibitedScorers.
+               /// </summary>
+               private Scorer MakeCountingSumScorer()
+               {
+                       // each scorer counted as a single matcher
+                       return (requiredScorers.Count == 0)?MakeCountingSumScorerNoReq():MakeCountingSumScorerSomeReq();
+               }
+               
+               private Scorer MakeCountingSumScorerNoReq()
+               {
+                       // No required scorers
+                       // minNrShouldMatch optional scorers are required, but at least 1
+                       int nrOptRequired = (minNrShouldMatch < 1)?1:minNrShouldMatch;
+                       Scorer requiredCountingSumScorer;
+                       if (optionalScorers.Count > nrOptRequired)
+                               requiredCountingSumScorer = CountingDisjunctionSumScorer(optionalScorers, nrOptRequired);
+                       else if (optionalScorers.Count == 1)
+                               requiredCountingSumScorer = new SingleMatchScorer(this, (Scorer) optionalScorers[0]);
+                       else
+                               requiredCountingSumScorer = CountingConjunctionSumScorer(optionalScorers);
+                       return AddProhibitedScorers(requiredCountingSumScorer);
+               }
+               
+               private Scorer MakeCountingSumScorerSomeReq()
+               {
+                       // At least one required scorer.
+                       if (optionalScorers.Count == minNrShouldMatch)
+                       {
+                               // all optional scorers also required.
+                               System.Collections.ArrayList allReq = new System.Collections.ArrayList(requiredScorers);
+                               allReq.AddRange(optionalScorers);
+                               return AddProhibitedScorers(CountingConjunctionSumScorer(allReq));
+                       }
+                       else
+                       {
+                               // optionalScorers.size() > minNrShouldMatch, and at least one required scorer
+                               Scorer requiredCountingSumScorer = requiredScorers.Count == 1?new SingleMatchScorer(this, (Scorer) requiredScorers[0]):CountingConjunctionSumScorer(requiredScorers);
+                               if (minNrShouldMatch > 0)
+                               {
+                                       // use a required disjunction scorer over the optional scorers
+                                       return AddProhibitedScorers(DualConjunctionSumScorer(requiredCountingSumScorer, CountingDisjunctionSumScorer(optionalScorers, minNrShouldMatch)));
+                               }
+                               else
+                               {
+                                       // minNrShouldMatch == 0
+                                       return new ReqOptSumScorer(AddProhibitedScorers(requiredCountingSumScorer), optionalScorers.Count == 1?new SingleMatchScorer(this, (Scorer) optionalScorers[0]):CountingDisjunctionSumScorer(optionalScorers, 1));
+                               }
+                       }
+               }
+               
+               /// <summary>Returns the scorer to be used for match counting and score summing.
+               /// Uses the given required scorer and the prohibitedScorers.
+               /// </summary>
+               /// <param name="requiredCountingSumScorer">A required scorer already built.
+               /// </param>
+               private Scorer AddProhibitedScorers(Scorer requiredCountingSumScorer)
+               {
+                       return (prohibitedScorers.Count == 0)?requiredCountingSumScorer:new ReqExclScorer(requiredCountingSumScorer, ((prohibitedScorers.Count == 1)?(Scorer) prohibitedScorers[0]:new DisjunctionSumScorer(prohibitedScorers)));
+               }
+               
+               /// <summary>Scores and collects all matching documents.</summary>
+               /// <param name="hc">The collector to which all matching documents are passed through
+               /// {@link HitCollector#Collect(int, float)}.
+               /// <br/>When this method is used the {@link #Explain(int)} method should not be used.
+               /// </param>
+               /// <deprecated> use {@link #Score(Collector)} instead.
+               /// </deprecated>
+        [Obsolete("use Score(Collector) instead.")]
+               public override void  Score(HitCollector hc)
+               {
+                       Score(new HitCollectorWrapper(hc));
+               }
+               
+               /// <summary>Scores and collects all matching documents.</summary>
+               /// <param name="collector">The collector to which all matching documents are passed through.
+               /// <br/>When this method is used the {@link #Explain(int)} method should not be used.
+               /// </param>
+               public override void  Score(Collector collector)
+               {
+                       collector.SetScorer(this);
+                       while ((doc = countingSumScorer.NextDoc()) != NO_MORE_DOCS)
+                       {
+                               collector.Collect(doc);
+                       }
+               }
+               
+               /// <summary>Expert: Collects matching documents in a range.
+               /// <br/>Note that {@link #Next()} must be called once before this method is
+               /// called for the first time.
+               /// </summary>
+               /// <param name="hc">The collector to which all matching documents are passed through
+               /// {@link HitCollector#Collect(int, float)}.
+               /// </param>
+               /// <param name="max">Do not score documents past this.
+               /// </param>
+               /// <returns> true if more matching documents may remain.
+               /// </returns>
+               /// <deprecated> use {@link #Score(Collector, int, int)} instead.
+               /// </deprecated>
+        [Obsolete("use Score(Collector, int, int) instead.")]
+               protected internal override bool Score(HitCollector hc, int max)
+               {
+                       return Score(new HitCollectorWrapper(hc), max, DocID());
+               }
+               
+               public /*protected internal*/ override bool Score(Collector collector, int max, int firstDocID)
+               {
+                       doc = firstDocID;
+                       collector.SetScorer(this);
+                       while (doc < max)
+                       {
+                               collector.Collect(doc);
+                               doc = countingSumScorer.NextDoc();
+                       }
+                       return doc != NO_MORE_DOCS;
+               }
+               
+               /// <deprecated> use {@link #DocID()} instead. 
+               /// </deprecated>
+        [Obsolete("use DocID() instead. ")]
+               public override int Doc()
+               {
+                       return countingSumScorer.Doc();
+               }
+               
+               public override int DocID()
+               {
+                       return doc;
+               }
+               
+               /// <deprecated> use {@link #NextDoc()} instead. 
+               /// </deprecated>
+        [Obsolete("use NextDoc() instead. ")]
+               public override bool Next()
+               {
+                       return NextDoc() != NO_MORE_DOCS;
+               }
+               
+               public override int NextDoc()
+               {
+                       return doc = countingSumScorer.NextDoc();
+               }
+               
+               public override float Score()
+               {
+                       coordinator.nrMatchers = 0;
+                       float sum = countingSumScorer.Score();
+                       return sum * coordinator.coordFactors[coordinator.nrMatchers];
+               }
+               
+               /// <deprecated> use {@link #Advance(int)} instead. 
+               /// </deprecated>
+        [Obsolete("use Advance(int) instead. ")]
+               public override bool SkipTo(int target)
+               {
+                       return Advance(target) != NO_MORE_DOCS;
+               }
+               
+               public override int Advance(int target)
+               {
+                       return doc = countingSumScorer.Advance(target);
+               }
+               
+               /// <summary>Throws an UnsupportedOperationException.
+               /// TODO: Implement an explanation of the coordination factor.
+               /// </summary>
+               /// <param name="doc">The document number for the explanation.
+               /// </param>
+               /// <throws>  UnsupportedOperationException </throws>
+               public override Explanation Explain(int doc)
+               {
+                       throw new System.NotSupportedException();
+                       /* How to explain the coordination factor?
+                       initCountingSumScorer();
+                       return countingSumScorer.explain(doc); // misses coord factor. 
+                       */
+               }
+               static BooleanScorer2()
+               {
+                       defaultSimilarity = Similarity.GetDefault();
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/CachingSpanFilter.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/CachingSpanFilter.cs
new file mode 100644 (file)
index 0000000..38d1ed6
--- /dev/null
@@ -0,0 +1,134 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using System.Runtime.InteropServices;
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary> Wraps another SpanFilter's result and caches it.  The purpose is to allow
+       /// filters to simply filter, and then wrap with this class to add caching.
+       /// </summary>
+       [Serializable]
+       public class CachingSpanFilter:SpanFilter
+       {
+               protected internal SpanFilter filter;
+               
+               /// <summary> A transient Filter cache.</summary>
+               [NonSerialized]
+        internal CachingWrapperFilter.FilterCache cache;
+
+        /// <summary>
+        /// New deletions always result in a cache miss, by default
+        /// ({@link CachingWrapperFilter.DeletesMode#RECACHE}.
+        /// <param name="filter">Filter to cache results of
+               /// </param>
+        /// </summary>
+        public CachingSpanFilter(SpanFilter filter): this(filter, CachingWrapperFilter.DeletesMode.RECACHE)
+               {
+                       
+               }
+
+        /**
+        * @param filter Filter to cache results of
+        * @param deletesMode See {@link CachingWrapperFilter.DeletesMode}
+        */
+        public CachingSpanFilter(SpanFilter filter, CachingWrapperFilter.DeletesMode deletesMode)
+        {
+            this.filter = filter;
+            if (deletesMode == CachingWrapperFilter.DeletesMode.DYNAMIC)
+            {
+                throw new System.ArgumentException("DeletesMode.DYNAMIC is not supported");
+            }
+            this.cache = new AnonymousFilterCache(deletesMode);
+        }
+
+        class AnonymousFilterCache : CachingWrapperFilter.FilterCache
+        {
+            public AnonymousFilterCache(CachingWrapperFilter.DeletesMode deletesMode) : base(deletesMode)
+            {
+            }
+
+            protected override object MergeDeletes(IndexReader reader, object docIdSet)
+            {
+                throw new System.ArgumentException("DeletesMode.DYNAMIC is not supported");
+            }
+        }
+
+               /// <deprecated> Use {@link #GetDocIdSet(IndexReader)} instead.
+               /// </deprecated>
+        [Obsolete("Use GetDocIdSet(IndexReader) instead.")]
+               public override System.Collections.BitArray Bits(IndexReader reader)
+               {
+                       SpanFilterResult result = GetCachedResult(reader);
+                       return result != null?result.GetBits():null;
+               }
+               
+               public override DocIdSet GetDocIdSet(IndexReader reader)
+               {
+                       SpanFilterResult result = GetCachedResult(reader);
+                       return result != null?result.GetDocIdSet():null;
+               }
+
+        // for testing
+        public int hitCount, missCount;
+
+               private SpanFilterResult GetCachedResult(IndexReader reader)
+               {
+            object coreKey = reader.GetFieldCacheKey();
+            object delCoreKey = reader.HasDeletions() ? reader.GetDeletesCacheKey() : coreKey;
+
+            SpanFilterResult result = (SpanFilterResult) cache.Get(reader, coreKey, delCoreKey);
+            if (result != null) {
+                hitCount++;
+                return result;
+            }
+
+            missCount++;
+            result = filter.BitSpans(reader);
+
+            cache.Put(coreKey, delCoreKey, result);
+            return result;
+               }
+               
+               
+               public override SpanFilterResult BitSpans(IndexReader reader)
+               {
+                       return GetCachedResult(reader);
+               }
+               
+               public override System.String ToString()
+               {
+                       return "CachingSpanFilter(" + filter + ")";
+               }
+               
+               public  override bool Equals(System.Object o)
+               {
+                       if (!(o is CachingSpanFilter))
+                               return false;
+                       return this.filter.Equals(((CachingSpanFilter) o).filter);
+               }
+               
+               public override int GetHashCode()
+               {
+                       return filter.GetHashCode() ^ 0x1117BF25;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/CachingWrapperFilter.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/CachingWrapperFilter.cs
new file mode 100644 (file)
index 0000000..72ecd77
--- /dev/null
@@ -0,0 +1,320 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+using System.Collections;
+using System.Collections.Generic;
+using System.Runtime.InteropServices;
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+using DocIdBitSet = Mono.Lucene.Net.Util.DocIdBitSet;
+using OpenBitSetDISI = Mono.Lucene.Net.Util.OpenBitSetDISI;
+using Mono.Lucene.Net.Util;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary> Wraps another filter's result and caches it.  The purpose is to allow
+       /// filters to simply filter, and then wrap with this class to add caching.
+       /// </summary>
+       [Serializable]
+       public class CachingWrapperFilter:Filter
+       {
+               protected internal Filter filter;
+               
+           /**
+       * Expert: Specifies how new deletions against a reopened
+       * reader should be handled.
+       *
+       * <p>The default is IGNORE, which means the cache entry
+       * will be re-used for a given segment, even when that
+       * segment has been reopened due to changes in deletions.
+       * This is a big performance gain, especially with
+       * near-real-timer readers, since you don't hit a cache
+       * miss on every reopened reader for prior segments.</p>
+       *
+       * <p>However, in some cases this can cause invalid query
+       * results, allowing deleted documents to be returned.
+       * This only happens if the main query does not rule out
+       * deleted documents on its own, such as a toplevel
+       * ConstantScoreQuery.  To fix this, use RECACHE to
+       * re-create the cached filter (at a higher per-reopen
+       * cost, but at faster subsequent search performance), or
+       * use DYNAMIC to dynamically intersect deleted docs (fast
+       * reopen time but some hit to search performance).</p>
+       */
+        [Serializable]
+        public class DeletesMode : Parameter
+        {
+            private DeletesMode(String name) : base(name)
+            {
+            }
+            public static DeletesMode IGNORE = new DeletesMode("IGNORE");
+            public static DeletesMode RECACHE = new DeletesMode("RECACHE");
+            public static DeletesMode DYNAMIC = new DeletesMode("DYNAMIC");
+        }
+
+               internal FilterCache cache;
+
+        [Serializable]
+        abstract internal class FilterCache 
+        {
+            /**
+             * A transient Filter cache (package private because of test)
+             */
+            // NOTE: not final so that we can dynamically re-init
+            // after de-serialize
+            volatile IDictionary cache;
+
+            private DeletesMode deletesMode;
+
+            public FilterCache(DeletesMode deletesMode)
+            {
+                this.deletesMode = deletesMode;
+            }
+
+            public Object Get(IndexReader reader, object coreKey, object delCoreKey)
+            {
+                lock (this)
+                {
+                    object value;
+
+                    if (cache == null)
+                    {
+                        cache = new SupportClass.WeakHashTable();
+                    }
+
+                    if (deletesMode == DeletesMode.IGNORE)
+                    {
+                        // key on core
+                        value = cache[coreKey];
+                    }
+                    else if (deletesMode == DeletesMode.RECACHE)
+                    {
+                        // key on deletes, if any, else core
+                        value = cache[delCoreKey];
+                    }
+                    else
+                    {
+
+                        System.Diagnostics.Debug.Assert(deletesMode == DeletesMode.DYNAMIC);
+
+                        // first try for exact match
+                        value = cache[delCoreKey];
+
+                        if (value == null)
+                        {
+                            // now for core match, but dynamically AND NOT
+                            // deletions
+                            value = cache[coreKey];
+                            if (value != null && reader.HasDeletions())
+                            {
+                                value = MergeDeletes(reader, value);
+                            }
+                        }
+                    }
+                    return value;
+                }
+
+            }
+       
+            protected abstract object MergeDeletes(IndexReader reader, object value);
+
+            public void Put(object coreKey, object delCoreKey, object value)
+            {
+                if (deletesMode == DeletesMode.IGNORE)
+                {
+                    cache[coreKey]= value;
+                }
+                else if (deletesMode == DeletesMode.RECACHE)
+                {
+                    cache[delCoreKey]=value;
+                }
+                else
+                {
+                    cache[coreKey]= value;
+                    cache[delCoreKey]= value;
+                }
+            }
+        }
+
+        /**
+          * New deletes are ignored by default, which gives higher
+          * cache hit rate on reopened readers.  Most of the time
+          * this is safe, because the filter will be AND'd with a
+          * Query that fully enforces deletions.  If instead you
+          * need this filter to always enforce deletions, pass
+          * either {@link DeletesMode#RECACHE} or {@link
+          * DeletesMode#DYNAMIC}.
+          * @param filter Filter to cache results of
+          */
+        public CachingWrapperFilter(Filter filter) : this(filter, DeletesMode.IGNORE)
+               {
+               }
+
+         /**
+   * Expert: by default, the cached filter will be shared
+   * across reopened segments that only had changes to their
+   * deletions.  
+   *
+   * @param filter Filter to cache results of
+   * @param deletesMode See {@link DeletesMode}
+   */
+        public CachingWrapperFilter(Filter filter, DeletesMode deletesMode)
+        {
+            this.filter = filter;
+            cache = new AnonymousFilterCache(deletesMode);
+            
+            //cache = new FilterCache(deletesMode) 
+            // {
+            //  public Object mergeDeletes(final IndexReader r, final Object docIdSet) {
+            //    return new FilteredDocIdSet((DocIdSet) docIdSet) {
+            //      protected boolean match(int docID) {
+            //        return !r.isDeleted(docID);
+            //      }
+            //    };
+            //  }
+            //};
+        }
+
+        class AnonymousFilterCache : FilterCache
+        {
+            class AnonymousFilteredDocIdSet : FilteredDocIdSet
+            {
+                IndexReader r;
+                public AnonymousFilteredDocIdSet(DocIdSet innerSet, IndexReader r) : base(innerSet)
+                {
+                    this.r = r;
+                }
+                public override bool Match(int docid)
+                {
+                    return !r.IsDeleted(docid);
+                }
+            }
+
+            public AnonymousFilterCache(DeletesMode deletesMode) : base(deletesMode)
+            {
+            }
+
+            protected  override object MergeDeletes(IndexReader reader, object docIdSet)
+            {
+                return new AnonymousFilteredDocIdSet((DocIdSet)docIdSet, reader);
+            }
+        }
+
+               /// <deprecated> Use {@link #GetDocIdSet(IndexReader)} instead.
+               /// </deprecated>
+        [Obsolete("Use GetDocIdSet(IndexReader) instead.")]
+               public override System.Collections.BitArray Bits(IndexReader reader)
+               {
+                       object coreKey = reader.GetFieldCacheKey();
+            object delCoreKey = reader.HasDeletions() ? reader.GetDeletesCacheKey() : coreKey;
+
+            object cached = cache.Get(reader, coreKey, delCoreKey);
+                       
+                       if (cached != null)
+                       {
+                               if (cached is System.Collections.BitArray)
+                               {
+                                       return (System.Collections.BitArray) cached;
+                               }
+                               else if (cached is DocIdBitSet)
+                                       return ((DocIdBitSet) cached).GetBitSet();
+                               // It would be nice to handle the DocIdSet case, but that's not really possible
+                       }
+                       
+                       System.Collections.BitArray bits = filter.Bits(reader);
+
+            if (bits != null)
+            {
+                cache.Put(coreKey, delCoreKey, bits);
+            }
+                       
+                       return bits;
+               }
+               
+               /// <summary>Provide the DocIdSet to be cached, using the DocIdSet provided
+               /// by the wrapped Filter.
+               /// This implementation returns the given DocIdSet.
+               /// </summary>
+               protected internal virtual DocIdSet DocIdSetToCache(DocIdSet docIdSet, IndexReader reader)
+               {
+            if (docIdSet == null)
+            {
+                // this is better than returning null, as the nonnull result can be cached
+                return DocIdSet.EMPTY_DOCIDSET;
+            }
+            else if (docIdSet.IsCacheable()) {
+                               return docIdSet;
+                       }
+                       else
+                       {
+                               DocIdSetIterator it = docIdSet.Iterator();
+                               // null is allowed to be returned by iterator(),
+                               // in this case we wrap with the empty set,
+                               // which is cacheable.
+                               return (it == null) ? DocIdSet.EMPTY_DOCIDSET : new OpenBitSetDISI(it, reader.MaxDoc());
+                       }
+               }
+
+        // for testing
+        public int hitCount, missCount;
+               
+               public override DocIdSet GetDocIdSet(IndexReader reader)
+               {
+                       object coreKey = reader.GetFieldCacheKey();
+            object delCoreKey = reader.HasDeletions() ? reader.GetDeletesCacheKey() : coreKey;
+
+            object cached = cache.Get(reader, coreKey, delCoreKey);
+                       
+                       if (cached != null)
+                       {
+                hitCount++;
+                               if (cached is DocIdSet)
+                                       return (DocIdSet) cached;
+                               else
+                                       return new DocIdBitSet((System.Collections.BitArray) cached);
+                       }
+            missCount++;
+            // cache miss
+                       DocIdSet docIdSet = DocIdSetToCache(filter.GetDocIdSet(reader), reader);
+                       
+                       if (docIdSet != null)
+                       {
+                cache.Put(coreKey, delCoreKey, docIdSet);
+                       }
+                       
+                       return docIdSet;
+               }
+               
+               public override System.String ToString()
+               {
+                       return "CachingWrapperFilter(" + filter + ")";
+               }
+               
+               public  override bool Equals(System.Object o)
+               {
+                       if (!(o is CachingWrapperFilter))
+                               return false;
+                       return this.filter.Equals(((CachingWrapperFilter) o).filter);
+               }
+               
+               public override int GetHashCode()
+               {
+                       return filter.GetHashCode() ^ 0x1117BF25;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Collector.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Collector.cs
new file mode 100644 (file)
index 0000000..027d315
--- /dev/null
@@ -0,0 +1,187 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary> <p/>Expert: Collectors are primarily meant to be used to
+       /// gather raw results from a search, and implement sorting
+       /// or custom result filtering, collation, etc. <p/>
+       /// 
+       /// <p/>As of 2.9, this class replaces the deprecated
+       /// HitCollector, and offers an API for efficient collection
+       /// of hits across sequential {@link IndexReader}s. {@link
+       /// IndexSearcher} advances the collector through each of the
+       /// sub readers, in an arbitrary order. This results in a
+       /// higher performance means of collection.<p/>
+       /// 
+       /// <p/>Lucene's core collectors are derived from Collector.
+       /// Likely your application can use one of these classes, or
+       /// subclass {@link TopDocsCollector}, instead of
+       /// implementing Collector directly:
+       /// 
+       /// <ul>
+       /// 
+       /// <li>{@link TopDocsCollector} is an abstract base class
+       /// that assumes you will retrieve the top N docs,
+       /// according to some criteria, after collection is
+       /// done.  </li>
+       /// 
+       /// <li>{@link TopScoreDocCollector} is a concrete subclass
+       /// {@link TopDocsCollector} and sorts according to score +
+       /// docID.  This is used internally by the {@link
+       /// IndexSearcher} search methods that do not take an
+       /// explicit {@link Sort}. It is likely the most frequently
+       /// used collector.</li>
+       /// 
+       /// <li>{@link TopFieldCollector} subclasses {@link
+       /// TopDocsCollector} and sorts according to a specified
+       /// {@link Sort} object (sort by field).  This is used
+       /// internally by the {@link IndexSearcher} search methods
+       /// that take an explicit {@link Sort}.</li>
+       /// 
+       /// <li>{@link TimeLimitingCollector}, which wraps any other
+       /// Collector and aborts the search if it's taken too much
+       /// time, will subclass Collector in 3.0 (presently it
+       /// subclasses the deprecated HitCollector).</li>
+       /// 
+       /// <li>{@link PositiveScoresOnlyCollector} wraps any other
+       /// Collector and prevents collection of hits whose score
+       /// is &lt;= 0.0</li>
+       /// 
+       /// </ul>
+       /// 
+       /// <p/>Collector decouples the score from the collected doc:
+       /// the score computation is skipped entirely if it's not
+       /// needed.  Collectors that do need the score should
+       /// implement the {@link #setScorer} method, to hold onto the
+       /// passed {@link Scorer} instance, and call {@link
+       /// Scorer#Score()} within the collect method to compute the
+       /// current hit's score.  If your collector may request the
+       /// score for a single hit multiple times, you should use
+       /// {@link ScoreCachingWrappingScorer}. <p/>
+       /// 
+       /// <p/><b>NOTE:</b> The doc that is passed to the collect
+       /// method is relative to the current reader. If your
+       /// collector needs to resolve this to the docID space of the
+       /// Multi*Reader, you must re-base it by recording the
+       /// docBase from the most recent setNextReader call.  Here's
+       /// a simple example showing how to collect docIDs into a
+       /// BitSet:<p/>
+       /// 
+       /// <pre>
+       /// Searcher searcher = new IndexSearcher(indexReader);
+       /// final BitSet bits = new BitSet(indexReader.maxDoc());
+       /// searcher.search(query, new Collector() {
+       /// private int docBase;
+       /// 
+       /// <em>// ignore scorer</em>
+       /// public void setScorer(Scorer scorer) {
+       /// }
+       /// 
+       /// <em>// accept docs out of order (for a BitSet it doesn't matter)</em>
+       /// public boolean acceptsDocsOutOfOrder() {
+       /// return true;
+       /// }
+       /// 
+       /// public void collect(int doc) {
+       /// bits.set(doc + docBase);
+       /// }
+       /// 
+       /// public void setNextReader(IndexReader reader, int docBase) {
+       /// this.docBase = docBase;
+       /// }
+       /// });
+       /// </pre>
+       /// 
+       /// <p/>Not all collectors will need to rebase the docID.  For
+       /// example, a collector that simply counts the total number
+       /// of hits would skip it.<p/>
+       /// 
+       /// <p/><b>NOTE:</b> Prior to 2.9, Lucene silently filtered
+       /// out hits with score &lt;= 0.  As of 2.9, the core Collectors
+       /// no longer do that.  It's very unusual to have such hits
+       /// (a negative query boost, or function query returning
+       /// negative custom scores, could cause it to happen).  If
+       /// you need that behavior, use {@link
+       /// PositiveScoresOnlyCollector}.<p/>
+       /// 
+       /// <p/><b>NOTE:</b> This API is experimental and might change
+       /// in incompatible ways in the next release.<p/>
+       /// 
+       /// </summary>
+       /// <since> 2.9
+       /// </since>
+       public abstract class Collector
+       {
+               
+               /// <summary> Called before successive calls to {@link #Collect(int)}. Implementations
+               /// that need the score of the current document (passed-in to
+               /// {@link #Collect(int)}), should save the passed-in Scorer and call
+               /// scorer.score() when needed.
+               /// </summary>
+               public abstract void  SetScorer(Scorer scorer);
+               
+               /// <summary> Called once for every document matching a query, with the unbased document
+               /// number.
+               /// 
+               /// <p/>
+               /// Note: This is called in an inner search loop. For good search performance,
+               /// implementations of this method should not call {@link Searcher#Doc(int)} or
+               /// {@link Mono.Lucene.Net.Index.IndexReader#Document(int)} on every hit.
+               /// Doing so can slow searches by an order of magnitude or more.
+               /// </summary>
+               public abstract void  Collect(int doc);
+               
+               /// <summary> Called before collecting from each IndexReader. All doc ids in
+               /// {@link #Collect(int)} will correspond to reader.
+               /// 
+               /// Add docBase to the current IndexReaders internal document id to re-base ids
+               /// in {@link #Collect(int)}.
+               /// 
+               /// </summary>
+               /// <param name="reader">next IndexReader
+               /// </param>
+               /// <param name="docBase">
+               /// </param>
+               public abstract void  SetNextReader(IndexReader reader, int docBase);
+               
+               /// <summary>
+               ///  * Return <code>true</code> if this collector does not
+               ///  * require the matching docIDs to be delivered in int sort
+        ///  * order (smallest to largest) to {@link #collect}.
+        ///  *
+        ///  * <p/> Most Lucene Query implementations will visit
+        ///  * matching docIDs in order.  However, some queries
+        ///  * (currently limited to certain cases of {@link
+        ///  * BooleanQuery}) can achieve faster searching if the
+        ///  * <code>Collector</code> allows them to deliver the
+        ///  * docIDs out of order.
+        ///  *
+        ///  * <p/> Many collectors don't mind getting docIDs out of
+        ///  * order, so it's important to return <code>true</code>
+        ///  * here.
+        ///  *
+               /// </summary>
+               /// <returns></returns>
+               public abstract bool AcceptsDocsOutOfOrder();
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/ComplexExplanation.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/ComplexExplanation.cs
new file mode 100644 (file)
index 0000000..3d70310
--- /dev/null
@@ -0,0 +1,80 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary>Expert: Describes the score computation for document and query, and
+       /// can distinguish a match independent of a positive value. 
+       /// </summary>
+       [Serializable]
+       public class ComplexExplanation:Explanation
+       {
+               private System.Boolean? match;
+        private bool isMatchSet = false;
+               
+               public ComplexExplanation():base()
+               {
+               }
+               
+               public ComplexExplanation(bool match, float value_Renamed, System.String description):base(value_Renamed, description)
+               {
+                       this.match = match;
+            this.isMatchSet = true;
+               }
+               
+               /// <summary> The match status of this explanation node.</summary>
+               /// <returns> May be null if match status is unknown
+               /// </returns>
+               public virtual System.Boolean? GetMatch()
+               {
+                       return match;
+               }
+               /// <summary> Sets the match status assigned to this explanation node.</summary>
+               /// <param name="match">May be null if match status is unknown
+               /// </param>
+               public virtual void  SetMatch(System.Boolean? match)
+               {
+                       this.match = match;
+            this.isMatchSet = true;
+               }
+               /// <summary> Indicates whether or not this Explanation models a good match.
+               /// 
+               /// <p/>
+               /// If the match status is explicitly set (i.e.: not null) this method
+               /// uses it; otherwise it defers to the superclass.
+               /// <p/>
+               /// </summary>
+               /// <seealso cref="getMatch">
+               /// </seealso>
+               public override bool IsMatch()
+               {
+                       System.Boolean? m = GetMatch();
+            return m ?? base.IsMatch();
+               }
+               
+               protected internal override System.String GetSummary()
+               {
+            if (isMatchSet == false)
+                               return base.GetSummary();
+                       
+                       return GetValue() + " = " + (IsMatch()?"(MATCH) ":"(NON-MATCH) ") + GetDescription();
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/ConjunctionScorer.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/ConjunctionScorer.cs
new file mode 100644 (file)
index 0000000..680181b
--- /dev/null
@@ -0,0 +1,200 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary>Scorer for conjunctions, sets of queries, all of which are required. </summary>
+       class ConjunctionScorer:Scorer
+       {
+               private class AnonymousClassComparator : System.Collections.IComparer
+               {
+                       public AnonymousClassComparator(ConjunctionScorer enclosingInstance)
+                       {
+                               InitBlock(enclosingInstance);
+                       }
+                       private void  InitBlock(ConjunctionScorer enclosingInstance)
+                       {
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private ConjunctionScorer enclosingInstance;
+                       public ConjunctionScorer Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       // sort the array
+                       public virtual int Compare(System.Object o1, System.Object o2)
+                       {
+                               return ((Scorer) o1).DocID() - ((Scorer) o2).DocID();
+                       }
+               }
+               
+               private Scorer[] scorers;
+               private float coord;
+               private int lastDoc = - 1;
+               
+               public ConjunctionScorer(Similarity similarity, System.Collections.ICollection scorers):this(similarity, (Scorer[]) new System.Collections.ArrayList(scorers).ToArray(typeof(Scorer)))
+               {
+               }
+               
+               public ConjunctionScorer(Similarity similarity, Scorer[] scorers):base(similarity)
+               {
+                       this.scorers = scorers;
+                       coord = similarity.Coord(scorers.Length, scorers.Length);
+                       
+                       for (int i = 0; i < scorers.Length; i++)
+                       {
+                               if (scorers[i].NextDoc() == NO_MORE_DOCS)
+                               {
+                                       // If even one of the sub-scorers does not have any documents, this
+                                       // scorer should not attempt to do any more work.
+                                       lastDoc = NO_MORE_DOCS;
+                                       return ;
+                               }
+                       }
+                       
+                       // Sort the array the first time...
+                       // We don't need to sort the array in any future calls because we know
+                       // it will already start off sorted (all scorers on same doc).
+                       
+                       // note that this comparator is not consistent with equals!
+                       System.Array.Sort(scorers, new AnonymousClassComparator(this));
+                       
+                       // NOTE: doNext() must be called before the re-sorting of the array later on.
+                       // The reason is this: assume there are 5 scorers, whose first docs are 1,
+                       // 2, 3, 5, 5 respectively. Sorting (above) leaves the array as is. Calling
+                       // doNext() here advances all the first scorers to 5 (or a larger doc ID
+                       // they all agree on). 
+                       // However, if we re-sort before doNext() is called, the order will be 5, 3,
+                       // 2, 1, 5 and then doNext() will stop immediately, since the first scorer's
+                       // docs equals the last one. So the invariant that after calling doNext() 
+                       // all scorers are on the same doc ID is broken.
+                       if (DoNext() == NO_MORE_DOCS)
+                       {
+                               // The scorers did not agree on any document.
+                               lastDoc = NO_MORE_DOCS;
+                               return ;
+                       }
+                       
+                       // If first-time skip distance is any predictor of
+                       // scorer sparseness, then we should always try to skip first on
+                       // those scorers.
+                       // Keep last scorer in it's last place (it will be the first
+                       // to be skipped on), but reverse all of the others so that
+                       // they will be skipped on in order of original high skip.
+                       int end = scorers.Length - 1;
+                       int max = end >> 1;
+                       for (int i = 0; i < max; i++)
+                       {
+                               Scorer tmp = scorers[i];
+                               int idx = end - i - 1;
+                               scorers[i] = scorers[idx];
+                               scorers[idx] = tmp;
+                       }
+               }
+               
+               private int DoNext()
+               {
+                       int first = 0;
+                       int doc = scorers[scorers.Length - 1].DocID();
+                       Scorer firstScorer;
+                       while ((firstScorer = scorers[first]).DocID() < doc)
+                       {
+                               doc = firstScorer.Advance(doc);
+                               first = first == scorers.Length - 1?0:first + 1;
+                       }
+                       return doc;
+               }
+               
+               public override int Advance(int target)
+               {
+                       if (lastDoc == NO_MORE_DOCS)
+                       {
+                               return lastDoc;
+                       }
+                       else if (scorers[(scorers.Length - 1)].DocID() < target)
+                       {
+                               scorers[(scorers.Length - 1)].Advance(target);
+                       }
+                       return lastDoc = DoNext();
+               }
+               
+               /// <deprecated> use {@link #DocID()} instead. 
+               /// </deprecated>
+        [Obsolete("use DocID() instead.")]
+               public override int Doc()
+               {
+                       return lastDoc;
+               }
+               
+               public override int DocID()
+               {
+                       return lastDoc;
+               }
+               
+               public override Explanation Explain(int doc)
+               {
+                       throw new System.NotSupportedException();
+               }
+               
+               /// <deprecated> use {@link #NextDoc()} instead. 
+               /// </deprecated>
+        [Obsolete("use NextDoc() instead.")]
+               public override bool Next()
+               {
+                       return NextDoc() != NO_MORE_DOCS;
+               }
+               
+               public override int NextDoc()
+               {
+                       if (lastDoc == NO_MORE_DOCS)
+                       {
+                               return lastDoc;
+                       }
+                       else if (lastDoc == - 1)
+                       {
+                               return lastDoc = scorers[scorers.Length - 1].DocID();
+                       }
+                       scorers[(scorers.Length - 1)].NextDoc();
+                       return lastDoc = DoNext();
+               }
+               
+               public override float Score()
+               {
+                       float sum = 0.0f;
+                       for (int i = 0; i < scorers.Length; i++)
+                       {
+                               sum += scorers[i].Score();
+                       }
+                       return sum * coord;
+               }
+               
+               /// <deprecated> use {@link #Advance(int)} instead. 
+               /// </deprecated>
+        [Obsolete("use Advance(int) instead.")]
+               public override bool SkipTo(int target)
+               {
+                       return Advance(target) != NO_MORE_DOCS;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/ConstantScoreQuery.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/ConstantScoreQuery.cs
new file mode 100644 (file)
index 0000000..16df531
--- /dev/null
@@ -0,0 +1,269 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary> A query that wraps a filter and simply returns a constant score equal to the
+       /// query boost for every document in the filter.
+       /// 
+       /// 
+       /// </summary>
+       /// <version>  $Id: ConstantScoreQuery.java 807180 2009-08-24 12:26:43Z markrmiller $
+       /// </version>
+       [Serializable]
+       public class ConstantScoreQuery:Query
+       {
+               protected internal Filter filter;
+               
+               public ConstantScoreQuery(Filter filter)
+               {
+                       this.filter = filter;
+               }
+               
+               /// <summary>Returns the encapsulated filter </summary>
+               public virtual Filter GetFilter()
+               {
+                       return filter;
+               }
+               
+               public override Query Rewrite(IndexReader reader)
+               {
+                       return this;
+               }
+               
+               public override void  ExtractTerms(System.Collections.Hashtable terms)
+               {
+                       // OK to not add any terms when used for MultiSearcher,
+                       // but may not be OK for highlighting
+               }
+               
+               [Serializable]
+               protected internal class ConstantWeight:Weight
+               {
+                       private void  InitBlock(ConstantScoreQuery enclosingInstance)
+                       {
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private ConstantScoreQuery enclosingInstance;
+                       public ConstantScoreQuery Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       private Similarity similarity;
+                       private float queryNorm;
+                       private float queryWeight;
+                       
+                       public ConstantWeight(ConstantScoreQuery enclosingInstance, Searcher searcher)
+                       {
+                               InitBlock(enclosingInstance);
+                               this.similarity = Enclosing_Instance.GetSimilarity(searcher);
+                       }
+                       
+                       public override Query GetQuery()
+                       {
+                               return Enclosing_Instance;
+                       }
+                       
+                       public override float GetValue()
+                       {
+                               return queryWeight;
+                       }
+                       
+                       public override float SumOfSquaredWeights()
+                       {
+                               queryWeight = Enclosing_Instance.GetBoost();
+                               return queryWeight * queryWeight;
+                       }
+                       
+                       public override void  Normalize(float norm)
+                       {
+                               this.queryNorm = norm;
+                               queryWeight *= this.queryNorm;
+                       }
+                       
+                       public override Scorer Scorer(IndexReader reader, bool scoreDocsInOrder, bool topScorer)
+                       {
+                               return new ConstantScorer(enclosingInstance, similarity, reader, this);
+                       }
+                       
+                       public override Explanation Explain(IndexReader reader, int doc)
+                       {
+                               
+                               ConstantScorer cs = new ConstantScorer(enclosingInstance, similarity, reader, this);
+                               bool exists = cs.docIdSetIterator.Advance(doc) == doc;
+                               
+                               ComplexExplanation result = new ComplexExplanation();
+                               
+                               if (exists)
+                               {
+                                       result.SetDescription("ConstantScoreQuery(" + Enclosing_Instance.filter + "), product of:");
+                                       result.SetValue(queryWeight);
+                                       System.Boolean tempAux = true;
+                                       result.SetMatch(tempAux);
+                                       result.AddDetail(new Explanation(Enclosing_Instance.GetBoost(), "boost"));
+                                       result.AddDetail(new Explanation(queryNorm, "queryNorm"));
+                               }
+                               else
+                               {
+                                       result.SetDescription("ConstantScoreQuery(" + Enclosing_Instance.filter + ") doesn't match id " + doc);
+                                       result.SetValue(0);
+                                       System.Boolean tempAux2 = false;
+                                       result.SetMatch(tempAux2);
+                               }
+                               return result;
+                       }
+               }
+               
+               protected internal class ConstantScorer:Scorer
+               {
+                       private void  InitBlock(ConstantScoreQuery enclosingInstance)
+                       {
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private ConstantScoreQuery enclosingInstance;
+                       public ConstantScoreQuery Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       internal DocIdSetIterator docIdSetIterator;
+                       internal float theScore;
+                       internal int doc = - 1;
+                       
+                       public ConstantScorer(ConstantScoreQuery enclosingInstance, Similarity similarity, IndexReader reader, Weight w):base(similarity)
+                       {
+                               InitBlock(enclosingInstance);
+                               theScore = w.GetValue();
+                               DocIdSet docIdSet = Enclosing_Instance.filter.GetDocIdSet(reader);
+                               if (docIdSet == null)
+                               {
+                                       docIdSetIterator = DocIdSet.EMPTY_DOCIDSET.Iterator();
+                               }
+                               else
+                               {
+                                       DocIdSetIterator iter = docIdSet.Iterator();
+                                       if (iter == null)
+                                       {
+                                               docIdSetIterator = DocIdSet.EMPTY_DOCIDSET.Iterator();
+                                       }
+                                       else
+                                       {
+                                               docIdSetIterator = iter;
+                                       }
+                               }
+                       }
+                       
+                       /// <deprecated> use {@link #NextDoc()} instead. 
+                       /// </deprecated>
+            [Obsolete("use NextDoc() instead.")]
+                       public override bool Next()
+                       {
+                               return docIdSetIterator.NextDoc() != NO_MORE_DOCS;
+                       }
+                       
+                       public override int NextDoc()
+                       {
+                               return docIdSetIterator.NextDoc();
+                       }
+                       
+                       /// <deprecated> use {@link #DocID()} instead. 
+                       /// </deprecated>
+            [Obsolete("use DocID() instead. ")]
+                       public override int Doc()
+                       {
+                               return docIdSetIterator.Doc();
+                       }
+                       
+                       public override int DocID()
+                       {
+                               return docIdSetIterator.DocID();
+                       }
+                       
+                       public override float Score()
+                       {
+                               return theScore;
+                       }
+                       
+                       /// <deprecated> use {@link #Advance(int)} instead. 
+                       /// </deprecated>
+            [Obsolete("use Advance(int) instead. ")]
+                       public override bool SkipTo(int target)
+                       {
+                               return docIdSetIterator.Advance(target) != NO_MORE_DOCS;
+                       }
+                       
+                       public override int Advance(int target)
+                       {
+                               return docIdSetIterator.Advance(target);
+                       }
+                       
+                       public override Explanation Explain(int doc)
+                       {
+                               throw new System.NotSupportedException();
+                       }
+               }
+               
+               public override Weight CreateWeight(Searcher searcher)
+               {
+                       return new ConstantScoreQuery.ConstantWeight(this, searcher);
+               }
+               
+               /// <summary>Prints a user-readable version of this query. </summary>
+               public override System.String ToString(System.String field)
+               {
+                       return "ConstantScore(" + filter.ToString() + (GetBoost() == 1.0?")":"^" + GetBoost());
+               }
+               
+               /// <summary>Returns true if <code>o</code> is equal to this. </summary>
+               public  override bool Equals(System.Object o)
+               {
+                       if (this == o)
+                               return true;
+                       if (!(o is ConstantScoreQuery))
+                               return false;
+                       ConstantScoreQuery other = (ConstantScoreQuery) o;
+                       return this.GetBoost() == other.GetBoost() && filter.Equals(other.filter);
+               }
+               
+               /// <summary>Returns a hash code value for this object. </summary>
+               public override int GetHashCode()
+               {
+                       // Simple add is OK since no existing filter hashcode has a float component.
+                       return filter.GetHashCode() + BitConverter.ToInt32(BitConverter.GetBytes(GetBoost()), 0);
+        }
+
+               override public System.Object Clone()
+               {
+            // {{Aroush-1.9}} is this all that we need to clone?!
+            ConstantScoreQuery clone = (ConstantScoreQuery)base.Clone();
+            clone.filter = (Filter)this.filter;
+            return clone;
+        }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/ConstantScoreRangeQuery.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/ConstantScoreRangeQuery.cs
new file mode 100644 (file)
index 0000000..1411788
--- /dev/null
@@ -0,0 +1,73 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary> A range query that returns a constant score equal to its boost for
+       /// all documents in the exclusive range of terms.
+       /// 
+       /// <p/>It does not have an upper bound on the number of clauses covered in the range.
+       /// 
+       /// <p/>This query matches the documents looking for terms that fall into the
+       /// supplied range according to {@link String#compareTo(String)}. It is not intended
+       /// for numerical ranges, use {@link NumericRangeQuery} instead.
+       /// 
+       /// <p/>This query is hardwired to {@link MultiTermQuery#CONSTANT_SCORE_AUTO_REWRITE_DEFAULT}.
+       /// If you want to change this, use {@link TermRangeQuery} instead.
+       /// 
+       /// </summary>
+       /// <deprecated> Use {@link TermRangeQuery} for term ranges or
+       /// {@link NumericRangeQuery} for numeric ranges instead.
+       /// This class will be removed in Lucene 3.0.
+       /// </deprecated>
+       /// <version>  $Id: ConstantScoreRangeQuery.java 797694 2009-07-25 00:03:33Z mikemccand $
+       /// </version>
+    [Obsolete("Use TermRangeQuery for term ranges or NumericRangeQuery for numeric ranges instead. This class will be removed in Lucene 3.0.")]
+       [Serializable]
+       public class ConstantScoreRangeQuery:TermRangeQuery
+       {
+               
+               public ConstantScoreRangeQuery(System.String fieldName, System.String lowerVal, System.String upperVal, bool includeLower, bool includeUpper):base(fieldName, lowerVal, upperVal, includeLower, includeUpper)
+               {
+                       rewriteMethod = CONSTANT_SCORE_AUTO_REWRITE_DEFAULT;
+               }
+               
+               public ConstantScoreRangeQuery(System.String fieldName, System.String lowerVal, System.String upperVal, bool includeLower, bool includeUpper, System.Globalization.CompareInfo collator):base(fieldName, lowerVal, upperVal, includeLower, includeUpper, collator)
+               {
+                       rewriteMethod = CONSTANT_SCORE_AUTO_REWRITE_DEFAULT;
+               }
+               
+               public virtual System.String GetLowerVal()
+               {
+                       return GetLowerTerm();
+               }
+               
+               public virtual System.String GetUpperVal()
+               {
+                       return GetUpperTerm();
+               }
+               
+               /// <summary>Changes of mode are not supported by this class (fixed to constant score rewrite mode) </summary>
+               public override void  SetRewriteMethod(RewriteMethod method)
+               {
+                       throw new System.NotSupportedException("Use TermRangeQuery instead to change the rewrite method.");
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/DefaultSimilarity.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/DefaultSimilarity.cs
new file mode 100644 (file)
index 0000000..2128b9b
--- /dev/null
@@ -0,0 +1,112 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using FieldInvertState = Mono.Lucene.Net.Index.FieldInvertState;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary>Expert: Default scoring implementation. </summary>
+       [Serializable]
+       public class DefaultSimilarity:Similarity
+       {
+               
+               /// <summary>Implemented as
+               /// <code>state.getBoost()*lengthNorm(numTerms)</code>, where
+               /// <code>numTerms</code> is {@link FieldInvertState#GetLength()} if {@link
+               /// #setDiscountOverlaps} is false, else it's {@link
+               /// FieldInvertState#GetLength()} - {@link
+               /// FieldInvertState#GetNumOverlap()}.
+               /// 
+               /// <p/><b>WARNING</b>: This API is new and experimental, and may suddenly
+               /// change.<p/> 
+               /// </summary>
+               public override float ComputeNorm(System.String field, FieldInvertState state)
+               {
+                       int numTerms;
+                       if (discountOverlaps)
+                               numTerms = state.GetLength() - state.GetNumOverlap();
+                       else
+                               numTerms = state.GetLength();
+                       return (float) (state.GetBoost() * LengthNorm(field, numTerms));
+               }
+               
+               /// <summary>Implemented as <code>1/sqrt(numTerms)</code>. </summary>
+               public override float LengthNorm(System.String fieldName, int numTerms)
+               {
+                       return (float) (1.0 / System.Math.Sqrt(numTerms));
+               }
+               
+               /// <summary>Implemented as <code>1/sqrt(sumOfSquaredWeights)</code>. </summary>
+               public override float QueryNorm(float sumOfSquaredWeights)
+               {
+                       return (float) (1.0 / System.Math.Sqrt(sumOfSquaredWeights));
+               }
+               
+               /// <summary>Implemented as <code>sqrt(freq)</code>. </summary>
+               public override float Tf(float freq)
+               {
+                       return (float) System.Math.Sqrt(freq);
+               }
+               
+               /// <summary>Implemented as <code>1 / (distance + 1)</code>. </summary>
+               public override float SloppyFreq(int distance)
+               {
+                       return 1.0f / (distance + 1);
+               }
+               
+               /// <summary>Implemented as <code>log(numDocs/(docFreq+1)) + 1</code>. </summary>
+               public override float Idf(int docFreq, int numDocs)
+               {
+                       return (float) (System.Math.Log(numDocs / (double) (docFreq + 1)) + 1.0);
+               }
+               
+               /// <summary>Implemented as <code>overlap / maxOverlap</code>. </summary>
+               public override float Coord(int overlap, int maxOverlap)
+               {
+                       return overlap / (float) maxOverlap;
+               }
+               
+               // Default false
+               protected internal bool discountOverlaps;
+               
+               /// <summary>Determines whether overlap tokens (Tokens with
+               /// 0 position increment) are ignored when computing
+               /// norm.  By default this is false, meaning overlap
+               /// tokens are counted just like non-overlap tokens.
+               /// 
+               /// <p/><b>WARNING</b>: This API is new and experimental, and may suddenly
+               /// change.<p/>
+               /// 
+               /// </summary>
+               /// <seealso cref="computeNorm">
+               /// </seealso>
+               public virtual void  SetDiscountOverlaps(bool v)
+               {
+                       discountOverlaps = v;
+               }
+               
+               /// <seealso cref="setDiscountOverlaps">
+               /// </seealso>
+               public virtual bool GetDiscountOverlaps()
+               {
+                       return discountOverlaps;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/DisjunctionMaxQuery.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/DisjunctionMaxQuery.cs
new file mode 100644 (file)
index 0000000..eafe9b1
--- /dev/null
@@ -0,0 +1,335 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary> A query that generates the union of documents produced by its subqueries, and that scores each document with the maximum
+       /// score for that document as produced by any subquery, plus a tie breaking increment for any additional matching subqueries.
+       /// This is useful when searching for a word in multiple fields with different boost factors (so that the fields cannot be
+       /// combined equivalently into a single search field).  We want the primary score to be the one associated with the highest boost,
+       /// not the sum of the field scores (as BooleanQuery would give).
+       /// If the query is "albino elephant" this ensures that "albino" matching one field and "elephant" matching
+       /// another gets a higher score than "albino" matching both fields.
+       /// To get this result, use both BooleanQuery and DisjunctionMaxQuery:  for each term a DisjunctionMaxQuery searches for it in
+       /// each field, while the set of these DisjunctionMaxQuery's is combined into a BooleanQuery.
+       /// The tie breaker capability allows results that include the same term in multiple fields to be judged better than results that
+       /// include this term in only the best of those multiple fields, without confusing this with the better case of two different terms
+       /// in the multiple fields.
+       /// </summary>
+       [Serializable]
+       public class DisjunctionMaxQuery:Query, System.ICloneable
+       {
+               
+               /* The subqueries */
+               private SupportClass.EquatableList<Query> disjuncts = new SupportClass.EquatableList<Query>();
+               
+               /* Multiple of the non-max disjunct scores added into our final score.  Non-zero values support tie-breaking. */
+               private float tieBreakerMultiplier = 0.0f;
+               
+               /// <summary>Creates a new empty DisjunctionMaxQuery.  Use add() to add the subqueries.</summary>
+               /// <param name="tieBreakerMultiplier">the score of each non-maximum disjunct for a document is multiplied by this weight
+               /// and added into the final score.  If non-zero, the value should be small, on the order of 0.1, which says that
+               /// 10 occurrences of word in a lower-scored field that is also in a higher scored field is just as good as a unique
+               /// word in the lower scored field (i.e., one that is not in any higher scored field.
+               /// </param>
+               public DisjunctionMaxQuery(float tieBreakerMultiplier)
+               {
+                       this.tieBreakerMultiplier = tieBreakerMultiplier;
+               }
+               
+               /// <summary> Creates a new DisjunctionMaxQuery</summary>
+        /// <param name="disjuncts">a Collection&lt;Query&gt; of all the disjuncts to add
+               /// </param>
+               /// <param name="tieBreakerMultiplier">  the weight to give to each matching non-maximum disjunct
+               /// </param>
+               public DisjunctionMaxQuery(System.Collections.ICollection disjuncts, float tieBreakerMultiplier)
+               {
+                       this.tieBreakerMultiplier = tieBreakerMultiplier;
+                       Add(disjuncts);
+               }
+               
+               /// <summary>Add a subquery to this disjunction</summary>
+               /// <param name="query">the disjunct added
+               /// </param>
+               public virtual void  Add(Query query)
+               {
+                       disjuncts.Add(query);
+               }
+               
+               /// <summary>Add a collection of disjuncts to this disjunction
+               /// via Iterable
+               /// </summary>
+               public virtual void  Add(System.Collections.ICollection disjuncts)
+               {
+                       this.disjuncts.AddRange(disjuncts);
+               }
+
+        /// <summary>An Iterator&lt;Query&gt; over the disjuncts </summary>
+               public virtual System.Collections.IEnumerator Iterator()
+               {
+                       return disjuncts.GetEnumerator();
+               }
+               
+               /// <summary> Expert: the Weight for DisjunctionMaxQuery, used to
+               /// normalize, score and explain these queries.
+               /// 
+               /// <p/>NOTE: this API and implementation is subject to
+               /// change suddenly in the next release.<p/>
+               /// </summary>
+               [Serializable]
+               protected internal class DisjunctionMaxWeight:Weight
+               {
+                       private void  InitBlock(DisjunctionMaxQuery enclosingInstance)
+                       {
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private DisjunctionMaxQuery enclosingInstance;
+                       public DisjunctionMaxQuery Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       /// <summary>The Similarity implementation. </summary>
+                       protected internal Similarity similarity;
+                       
+                       /// <summary>The Weights for our subqueries, in 1-1 correspondence with disjuncts </summary>
+                       protected internal System.Collections.ArrayList weights = new System.Collections.ArrayList(); // The Weight's for our subqueries, in 1-1 correspondence with disjuncts
+                       
+                       /* Construct the Weight for this Query searched by searcher.  Recursively construct subquery weights. */
+                       public DisjunctionMaxWeight(DisjunctionMaxQuery enclosingInstance, Searcher searcher)
+                       {
+                               InitBlock(enclosingInstance);
+                               this.similarity = searcher.GetSimilarity();
+                               for (System.Collections.IEnumerator iter = Enclosing_Instance.disjuncts.GetEnumerator(); iter.MoveNext(); )
+                               {
+                                       weights.Add(((Query) iter.Current).CreateWeight(searcher));
+                               }
+                       }
+                       
+                       /* Return our associated DisjunctionMaxQuery */
+                       public override Query GetQuery()
+                       {
+                               return Enclosing_Instance;
+                       }
+                       
+                       /* Return our boost */
+                       public override float GetValue()
+                       {
+                               return Enclosing_Instance.GetBoost();
+                       }
+                       
+                       /* Compute the sub of squared weights of us applied to our subqueries.  Used for normalization. */
+                       public override float SumOfSquaredWeights()
+                       {
+                               float max = 0.0f, sum = 0.0f;
+                               for (System.Collections.IEnumerator iter = weights.GetEnumerator(); iter.MoveNext(); )
+                               {
+                                       float sub = ((Weight) iter.Current).SumOfSquaredWeights();
+                                       sum += sub;
+                                       max = System.Math.Max(max, sub);
+                               }
+                               float boost = Enclosing_Instance.GetBoost();
+                               return (((sum - max) * Enclosing_Instance.tieBreakerMultiplier * Enclosing_Instance.tieBreakerMultiplier) + max) * boost * boost;
+                       }
+                       
+                       /* Apply the computed normalization factor to our subqueries */
+                       public override void  Normalize(float norm)
+                       {
+                               norm *= Enclosing_Instance.GetBoost(); // Incorporate our boost
+                               for (System.Collections.IEnumerator iter = weights.GetEnumerator(); iter.MoveNext(); )
+                               {
+                                       ((Weight) iter.Current).Normalize(norm);
+                               }
+                       }
+                       
+                       /* Create the scorer used to score our associated DisjunctionMaxQuery */
+                       public override Scorer Scorer(IndexReader reader, bool scoreDocsInOrder, bool topScorer)
+                       {
+                               Scorer[] scorers = new Scorer[weights.Count];
+                               int idx = 0;
+                               for (System.Collections.IEnumerator iter = weights.GetEnumerator(); iter.MoveNext(); )
+                               {
+                                       Weight w = (Weight) iter.Current;
+                                       Scorer subScorer = w.Scorer(reader, true, false);
+                                       if (subScorer != null && subScorer.NextDoc() != DocIdSetIterator.NO_MORE_DOCS)
+                                       {
+                                               scorers[idx++] = subScorer;
+                                       }
+                               }
+                               if (idx == 0)
+                                       return null; // all scorers did not have documents
+                               DisjunctionMaxScorer result = new DisjunctionMaxScorer(Enclosing_Instance.tieBreakerMultiplier, similarity, scorers, idx);
+                               return result;
+                       }
+                       
+                       /* Explain the score we computed for doc */
+                       public override Explanation Explain(IndexReader reader, int doc)
+                       {
+                               if (Enclosing_Instance.disjuncts.Count == 1)
+                                       return ((Weight) weights[0]).Explain(reader, doc);
+                               ComplexExplanation result = new ComplexExplanation();
+                               float max = 0.0f, sum = 0.0f;
+                               result.SetDescription(Enclosing_Instance.tieBreakerMultiplier == 0.0f?"max of:":"max plus " + Enclosing_Instance.tieBreakerMultiplier + " times others of:");
+                               for (System.Collections.IEnumerator iter = weights.GetEnumerator(); iter.MoveNext(); )
+                               {
+                                       Explanation e = ((Weight) iter.Current).Explain(reader, doc);
+                                       if (e.IsMatch())
+                                       {
+                                               System.Boolean tempAux = true;
+                                               result.SetMatch(tempAux);
+                                               result.AddDetail(e);
+                                               sum += e.GetValue();
+                                               max = System.Math.Max(max, e.GetValue());
+                                       }
+                               }
+                               result.SetValue(max + (sum - max) * Enclosing_Instance.tieBreakerMultiplier);
+                               return result;
+                       }
+               } // end of DisjunctionMaxWeight inner class
+               
+               /* Create the Weight used to score us */
+               public override Weight CreateWeight(Searcher searcher)
+               {
+                       return new DisjunctionMaxWeight(this, searcher);
+               }
+               
+               /// <summary>Optimize our representation and our subqueries representations</summary>
+               /// <param name="reader">the IndexReader we query
+               /// </param>
+               /// <returns> an optimized copy of us (which may not be a copy if there is nothing to optimize) 
+               /// </returns>
+               public override Query Rewrite(IndexReader reader)
+               {
+                       int numDisjunctions = disjuncts.Count;
+                       if (numDisjunctions == 1)
+                       {
+                               Query singleton = (Query) disjuncts[0];
+                               Query result = singleton.Rewrite(reader);
+                               if (GetBoost() != 1.0f)
+                               {
+                                       if (result == singleton)
+                                               result = (Query) result.Clone();
+                                       result.SetBoost(GetBoost() * result.GetBoost());
+                               }
+                               return result;
+                       }
+                       DisjunctionMaxQuery clone = null;
+                       for (int i = 0; i < numDisjunctions; i++)
+                       {
+                               Query clause = (Query) disjuncts[i];
+                               Query rewrite = clause.Rewrite(reader);
+                               if (rewrite != clause)
+                               {
+                                       if (clone == null)
+                                               clone = (DisjunctionMaxQuery) this.Clone();
+                                       clone.disjuncts[i] = rewrite;
+                               }
+                       }
+                       if (clone != null)
+                               return clone;
+                       else
+                               return this;
+               }
+               
+               /// <summary>Create a shallow copy of us -- used in rewriting if necessary</summary>
+               /// <returns> a copy of us (but reuse, don't copy, our subqueries) 
+               /// </returns>
+               public override System.Object Clone()
+               {
+                       DisjunctionMaxQuery clone = (DisjunctionMaxQuery) base.Clone();
+            clone.disjuncts = (SupportClass.EquatableList<Query>) this.disjuncts.Clone();
+                       return clone;
+               }
+               
+               // inherit javadoc
+               public override void  ExtractTerms(System.Collections.Hashtable terms)
+               {
+                       for (System.Collections.IEnumerator iter = disjuncts.GetEnumerator(); iter.MoveNext(); )
+                       {
+                               ((Query) iter.Current).ExtractTerms(terms);
+                       }
+               }
+               
+               /// <summary>Prettyprint us.</summary>
+               /// <param name="field">the field to which we are applied
+               /// </param>
+               /// <returns> a string that shows what we do, of the form "(disjunct1 | disjunct2 | ... | disjunctn)^boost"
+               /// </returns>
+               public override System.String ToString(System.String field)
+               {
+                       System.Text.StringBuilder buffer = new System.Text.StringBuilder();
+                       buffer.Append("(");
+                       int numDisjunctions = disjuncts.Count;
+                       for (int i = 0; i < numDisjunctions; i++)
+                       {
+                               Query subquery = (Query) disjuncts[i];
+                               if (subquery is BooleanQuery)
+                               {
+                                       // wrap sub-bools in parens
+                                       buffer.Append("(");
+                                       buffer.Append(subquery.ToString(field));
+                                       buffer.Append(")");
+                               }
+                               else
+                                       buffer.Append(subquery.ToString(field));
+                               if (i != numDisjunctions - 1)
+                                       buffer.Append(" | ");
+                       }
+                       buffer.Append(")");
+                       if (tieBreakerMultiplier != 0.0f)
+                       {
+                               buffer.Append("~");
+                               buffer.Append(tieBreakerMultiplier);
+                       }
+                       if (GetBoost() != 1.0)
+                       {
+                               buffer.Append("^");
+                               buffer.Append(GetBoost());
+                       }
+                       return buffer.ToString();
+               }
+               
+               /// <summary>Return true iff we represent the same query as o</summary>
+               /// <param name="o">another object
+               /// </param>
+               /// <returns> true iff o is a DisjunctionMaxQuery with the same boost and the same subqueries, in the same order, as us
+               /// </returns>
+               public  override bool Equals(System.Object o)
+               {
+                       if (!(o is DisjunctionMaxQuery))
+                               return false;
+                       DisjunctionMaxQuery other = (DisjunctionMaxQuery) o;
+                       return this.GetBoost() == other.GetBoost() && this.tieBreakerMultiplier == other.tieBreakerMultiplier && this.disjuncts.Equals(other.disjuncts);
+               }
+               
+               /// <summary>Compute a hash code for hashing us</summary>
+               /// <returns> the hash code
+               /// </returns>
+               public override int GetHashCode()
+               {
+                       return BitConverter.ToInt32(BitConverter.GetBytes(GetBoost()), 0) + BitConverter.ToInt32(BitConverter.GetBytes(tieBreakerMultiplier), 0) + disjuncts.GetHashCode();
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/DisjunctionMaxScorer.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/DisjunctionMaxScorer.cs
new file mode 100644 (file)
index 0000000..eda2ef1
--- /dev/null
@@ -0,0 +1,263 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary> The Scorer for DisjunctionMaxQuery's.  The union of all documents generated by the the subquery scorers
+       /// is generated in document number order.  The score for each document is the maximum of the scores computed
+       /// by the subquery scorers that generate that document, plus tieBreakerMultiplier times the sum of the scores
+       /// for the other subqueries that generate the document.
+       /// </summary>
+       class DisjunctionMaxScorer:Scorer
+       {
+               
+               /* The scorers for subqueries that have remaining docs, kept as a min heap by number of next doc. */
+               private Scorer[] subScorers;
+               private int numScorers;
+               /* Multiplier applied to non-maximum-scoring subqueries for a document as they are summed into the result. */
+               private float tieBreakerMultiplier;
+               private int doc = - 1;
+               
+               /// <summary> Creates a new instance of DisjunctionMaxScorer
+               /// 
+               /// </summary>
+               /// <param name="tieBreakerMultiplier">Multiplier applied to non-maximum-scoring subqueries for a
+               /// document as they are summed into the result.
+               /// </param>
+               /// <param name="similarity">-- not used since our definition involves neither coord nor terms
+               /// directly
+               /// </param>
+               /// <param name="subScorers">The sub scorers this Scorer should iterate on
+               /// </param>
+               /// <param name="numScorers">The actual number of scorers to iterate on. Note that the array's
+               /// length may be larger than the actual number of scorers.
+               /// </param>
+               public DisjunctionMaxScorer(float tieBreakerMultiplier, Similarity similarity, Scorer[] subScorers, int numScorers):base(similarity)
+               {
+                       
+                       this.tieBreakerMultiplier = tieBreakerMultiplier;
+                       // The passed subScorers array includes only scorers which have documents
+                       // (DisjunctionMaxQuery takes care of that), and their nextDoc() was already
+                       // called.
+                       this.subScorers = subScorers;
+                       this.numScorers = numScorers;
+                       
+                       Heapify();
+               }
+               
+               /// <summary> Generate the next document matching our associated DisjunctionMaxQuery.
+               /// 
+               /// </summary>
+               /// <returns> true iff there is a next document
+               /// </returns>
+               /// <deprecated> use {@link #NextDoc()} instead.
+               /// </deprecated>
+        [Obsolete("use NextDoc() instead.")]
+               public override bool Next()
+               {
+                       return NextDoc() != NO_MORE_DOCS;
+               }
+               
+               public override int NextDoc()
+               {
+                       if (numScorers == 0)
+                               return doc = NO_MORE_DOCS;
+                       while (subScorers[0].DocID() == doc)
+                       {
+                               if (subScorers[0].NextDoc() != NO_MORE_DOCS)
+                               {
+                                       HeapAdjust(0);
+                               }
+                               else
+                               {
+                                       HeapRemoveRoot();
+                                       if (numScorers == 0)
+                                       {
+                                               return doc = NO_MORE_DOCS;
+                                       }
+                               }
+                       }
+                       
+                       return doc = subScorers[0].DocID();
+               }
+               
+               /// <deprecated> use {@link #DocID()} instead. 
+               /// </deprecated>
+        [Obsolete("use DocID() instead. ")]
+               public override int Doc()
+               {
+                       return subScorers[0].Doc();
+               }
+               
+               public override int DocID()
+               {
+                       return doc;
+               }
+               
+               /// <summary>Determine the current document score.  Initially invalid, until {@link #Next()} is called the first time.</summary>
+               /// <returns> the score of the current generated document
+               /// </returns>
+               public override float Score()
+               {
+                       int doc = subScorers[0].DocID();
+                       float[] sum = new float[]{subScorers[0].Score()}, max = new float[]{sum[0]};
+                       int size = numScorers;
+                       ScoreAll(1, size, doc, sum, max);
+                       ScoreAll(2, size, doc, sum, max);
+                       return max[0] + (sum[0] - max[0]) * tieBreakerMultiplier;
+               }
+               
+               // Recursively iterate all subScorers that generated last doc computing sum and max
+               private void  ScoreAll(int root, int size, int doc, float[] sum, float[] max)
+               {
+                       if (root < size && subScorers[root].DocID() == doc)
+                       {
+                               float sub = subScorers[root].Score();
+                               sum[0] += sub;
+                               max[0] = System.Math.Max(max[0], sub);
+                               ScoreAll((root << 1) + 1, size, doc, sum, max);
+                               ScoreAll((root << 1) + 2, size, doc, sum, max);
+                       }
+               }
+               
+               /// <summary> Advance to the first document beyond the current whose number is greater
+               /// than or equal to target.
+               /// 
+               /// </summary>
+               /// <param name="target">the minimum number of the next desired document
+               /// </param>
+               /// <returns> true iff there is a document to be generated whose number is at
+               /// least target
+               /// </returns>
+               /// <deprecated> use {@link #Advance(int)} instead.
+               /// </deprecated>
+        [Obsolete("use Advance(int) instead.")]
+               public override bool SkipTo(int target)
+               {
+                       return Advance(target) != NO_MORE_DOCS;
+               }
+               
+               public override int Advance(int target)
+               {
+                       if (numScorers == 0)
+                               return doc = NO_MORE_DOCS;
+                       while (subScorers[0].DocID() < target)
+                       {
+                               if (subScorers[0].Advance(target) != NO_MORE_DOCS)
+                               {
+                                       HeapAdjust(0);
+                               }
+                               else
+                               {
+                                       HeapRemoveRoot();
+                                       if (numScorers == 0)
+                                       {
+                                               return doc = NO_MORE_DOCS;
+                                       }
+                               }
+                       }
+                       return doc = subScorers[0].DocID();
+               }
+               
+               /// <summary>Explain a score that we computed.  UNSUPPORTED -- see explanation capability in DisjunctionMaxQuery.</summary>
+               /// <param name="doc">the number of a document we scored
+               /// </param>
+               /// <returns> the Explanation for our score
+               /// </returns>
+               public override Explanation Explain(int doc)
+               {
+                       throw new System.NotSupportedException();
+               }
+               
+               // Organize subScorers into a min heap with scorers generating the earliest document on top.
+               private void  Heapify()
+               {
+                       for (int i = (numScorers >> 1) - 1; i >= 0; i--)
+                       {
+                               HeapAdjust(i);
+                       }
+               }
+               
+               /* The subtree of subScorers at root is a min heap except possibly for its root element.
+               * Bubble the root down as required to make the subtree a heap.
+               */
+               private void  HeapAdjust(int root)
+               {
+                       Scorer scorer = subScorers[root];
+                       int doc = scorer.DocID();
+                       int i = root;
+                       while (i <= (numScorers >> 1) - 1)
+                       {
+                               int lchild = (i << 1) + 1;
+                               Scorer lscorer = subScorers[lchild];
+                               int ldoc = lscorer.DocID();
+                               int rdoc = System.Int32.MaxValue, rchild = (i << 1) + 2;
+                               Scorer rscorer = null;
+                               if (rchild < numScorers)
+                               {
+                                       rscorer = subScorers[rchild];
+                                       rdoc = rscorer.DocID();
+                               }
+                               if (ldoc < doc)
+                               {
+                                       if (rdoc < ldoc)
+                                       {
+                                               subScorers[i] = rscorer;
+                                               subScorers[rchild] = scorer;
+                                               i = rchild;
+                                       }
+                                       else
+                                       {
+                                               subScorers[i] = lscorer;
+                                               subScorers[lchild] = scorer;
+                                               i = lchild;
+                                       }
+                               }
+                               else if (rdoc < doc)
+                               {
+                                       subScorers[i] = rscorer;
+                                       subScorers[rchild] = scorer;
+                                       i = rchild;
+                               }
+                               else
+                               {
+                                       return ;
+                               }
+                       }
+               }
+               
+               // Remove the root Scorer from subScorers and re-establish it as a heap
+               private void  HeapRemoveRoot()
+               {
+                       if (numScorers == 1)
+                       {
+                               subScorers[0] = null;
+                               numScorers = 0;
+                       }
+                       else
+                       {
+                               subScorers[0] = subScorers[numScorers - 1];
+                               subScorers[numScorers - 1] = null;
+                               --numScorers;
+                               HeapAdjust(0);
+                       }
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/DisjunctionSumScorer.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/DisjunctionSumScorer.cs
new file mode 100644 (file)
index 0000000..44d6832
--- /dev/null
@@ -0,0 +1,382 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using ScorerDocQueue = Mono.Lucene.Net.Util.ScorerDocQueue;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary>A Scorer for OR like queries, counterpart of <code>ConjunctionScorer</code>.
+       /// This Scorer implements {@link Scorer#SkipTo(int)} and uses skipTo() on the given Scorers. 
+       /// TODO: Implement score(HitCollector, int).
+       /// </summary>
+       class DisjunctionSumScorer:Scorer
+       {
+               /// <summary>The number of subscorers. </summary>
+               private int nrScorers;
+               
+               /// <summary>The subscorers. </summary>
+               protected internal System.Collections.IList subScorers;
+               
+               /// <summary>The minimum number of scorers that should match. </summary>
+               private int minimumNrMatchers;
+               
+               /// <summary>The scorerDocQueue contains all subscorers ordered by their current doc(),
+               /// with the minimum at the top.
+               /// <br/>The scorerDocQueue is initialized the first time next() or skipTo() is called.
+               /// <br/>An exhausted scorer is immediately removed from the scorerDocQueue.
+               /// <br/>If less than the minimumNrMatchers scorers
+               /// remain in the scorerDocQueue next() and skipTo() return false.
+               /// <p/>
+               /// After each to call to next() or skipTo()
+               /// <code>currentSumScore</code> is the total score of the current matching doc,
+               /// <code>nrMatchers</code> is the number of matching scorers,
+               /// and all scorers are after the matching doc, or are exhausted.
+               /// </summary>
+               private ScorerDocQueue scorerDocQueue;
+               
+               /// <summary>The document number of the current match. </summary>
+               private int currentDoc = - 1;
+               
+               /// <summary>The number of subscorers that provide the current match. </summary>
+               protected internal int nrMatchers = - 1;
+               
+               private float currentScore = System.Single.NaN;
+               
+               /// <summary>Construct a <code>DisjunctionScorer</code>.</summary>
+               /// <param name="subScorers">A collection of at least two subscorers.
+               /// </param>
+               /// <param name="minimumNrMatchers">The positive minimum number of subscorers that should
+               /// match to match this query.
+               /// <br/>When <code>minimumNrMatchers</code> is bigger than
+               /// the number of <code>subScorers</code>,
+               /// no matches will be produced.
+               /// <br/>When minimumNrMatchers equals the number of subScorers,
+               /// it more efficient to use <code>ConjunctionScorer</code>.
+               /// </param>
+               public DisjunctionSumScorer(System.Collections.IList subScorers, int minimumNrMatchers):base(null)
+               {
+                       
+                       nrScorers = subScorers.Count;
+                       
+                       if (minimumNrMatchers <= 0)
+                       {
+                               throw new System.ArgumentException("Minimum nr of matchers must be positive");
+                       }
+                       if (nrScorers <= 1)
+                       {
+                               throw new System.ArgumentException("There must be at least 2 subScorers");
+                       }
+                       
+                       this.minimumNrMatchers = minimumNrMatchers;
+                       this.subScorers = subScorers;
+                       
+                       InitScorerDocQueue();
+               }
+               
+               /// <summary>Construct a <code>DisjunctionScorer</code>, using one as the minimum number
+               /// of matching subscorers.
+               /// </summary>
+               public DisjunctionSumScorer(System.Collections.IList subScorers):this(subScorers, 1)
+               {
+               }
+               
+               /// <summary>Called the first time next() or skipTo() is called to
+               /// initialize <code>scorerDocQueue</code>.
+               /// </summary>
+               private void  InitScorerDocQueue()
+               {
+                       System.Collections.IEnumerator si = subScorers.GetEnumerator();
+                       scorerDocQueue = new ScorerDocQueue(nrScorers);
+                       while (si.MoveNext())
+                       {
+                               Scorer se = (Scorer) si.Current;
+                               if (se.NextDoc() != NO_MORE_DOCS)
+                               {
+                                       // doc() method will be used in scorerDocQueue.
+                                       scorerDocQueue.Insert(se);
+                               }
+                       }
+               }
+               
+               /// <summary>Scores and collects all matching documents.</summary>
+               /// <param name="hc">The collector to which all matching documents are passed through
+               /// {@link HitCollector#Collect(int, float)}.
+               /// <br/>When this method is used the {@link #Explain(int)} method should not be used.
+               /// </param>
+               /// <deprecated> use {@link #Score(Collector)} instead.
+               /// </deprecated>
+        [Obsolete("use Score(Collector) instead.")]
+               public override void  Score(HitCollector hc)
+               {
+                       Score(new HitCollectorWrapper(hc));
+               }
+               
+               /// <summary>Scores and collects all matching documents.</summary>
+               /// <param name="collector">The collector to which all matching documents are passed through.
+               /// <br/>When this method is used the {@link #Explain(int)} method should not be used.
+               /// </param>
+               public override void  Score(Collector collector)
+               {
+                       collector.SetScorer(this);
+                       while (NextDoc() != NO_MORE_DOCS)
+                       {
+                               collector.Collect(currentDoc);
+                       }
+               }
+               
+               /// <summary>Expert: Collects matching documents in a range.  Hook for optimization.
+               /// Note that {@link #Next()} must be called once before this method is called
+               /// for the first time.
+               /// </summary>
+               /// <param name="hc">The collector to which all matching documents are passed through
+               /// {@link HitCollector#Collect(int, float)}.
+               /// </param>
+               /// <param name="max">Do not score documents past this.
+               /// </param>
+               /// <returns> true if more matching documents may remain.
+               /// </returns>
+               /// <deprecated> use {@link #Score(Collector, int, int)} instead.
+               /// </deprecated>
+        [Obsolete("use Score(Collector, int, int) instead.")]
+               protected internal override bool Score(HitCollector hc, int max)
+               {
+                       return Score(new HitCollectorWrapper(hc), max, DocID());
+               }
+               
+               /// <summary>Expert: Collects matching documents in a range.  Hook for optimization.
+               /// Note that {@link #Next()} must be called once before this method is called
+               /// for the first time.
+               /// </summary>
+               /// <param name="collector">The collector to which all matching documents are passed through.
+               /// </param>
+               /// <param name="max">Do not score documents past this.
+               /// </param>
+               /// <returns> true if more matching documents may remain.
+               /// </returns>
+               public /*protected internal*/ override bool Score(Collector collector, int max, int firstDocID)
+               {
+                       // firstDocID is ignored since nextDoc() sets 'currentDoc'
+                       collector.SetScorer(this);
+                       while (currentDoc < max)
+                       {
+                               collector.Collect(currentDoc);
+                               if (NextDoc() == NO_MORE_DOCS)
+                               {
+                                       return false;
+                               }
+                       }
+                       return true;
+               }
+               
+               /// <deprecated> use {@link #NextDoc()} instead. 
+               /// </deprecated>
+        [Obsolete("use NextDoc() instead. ")]
+               public override bool Next()
+               {
+                       return NextDoc() != NO_MORE_DOCS;
+               }
+               
+               public override int NextDoc()
+               {
+                       if (scorerDocQueue.Size() < minimumNrMatchers || !AdvanceAfterCurrent())
+                       {
+                               currentDoc = NO_MORE_DOCS;
+                       }
+                       return currentDoc;
+               }
+               
+               /// <summary>Advance all subscorers after the current document determined by the
+               /// top of the <code>scorerDocQueue</code>.
+               /// Repeat until at least the minimum number of subscorers match on the same
+               /// document and all subscorers are after that document or are exhausted.
+               /// <br/>On entry the <code>scorerDocQueue</code> has at least <code>minimumNrMatchers</code>
+               /// available. At least the scorer with the minimum document number will be advanced.
+               /// </summary>
+               /// <returns> true iff there is a match.
+               /// <br/>In case there is a match, <code>currentDoc</code>, <code>currentSumScore</code>,
+               /// and <code>nrMatchers</code> describe the match.
+               /// 
+               /// TODO: Investigate whether it is possible to use skipTo() when
+               /// the minimum number of matchers is bigger than one, ie. try and use the
+               /// character of ConjunctionScorer for the minimum number of matchers.
+               /// Also delay calling score() on the sub scorers until the minimum number of
+               /// matchers is reached.
+               /// <br/>For this, a Scorer array with minimumNrMatchers elements might
+               /// hold Scorers at currentDoc that are temporarily popped from scorerQueue.
+               /// </returns>
+               protected internal virtual bool AdvanceAfterCurrent()
+               {
+                       do 
+                       {
+                               // repeat until minimum nr of matchers
+                               currentDoc = scorerDocQueue.TopDoc();
+                               currentScore = scorerDocQueue.TopScore();
+                               nrMatchers = 1;
+                               do 
+                               {
+                                       // Until all subscorers are after currentDoc
+                                       if (!scorerDocQueue.TopNextAndAdjustElsePop())
+                                       {
+                                               if (scorerDocQueue.Size() == 0)
+                                               {
+                                                       break; // nothing more to advance, check for last match.
+                                               }
+                                       }
+                                       if (scorerDocQueue.TopDoc() != currentDoc)
+                                       {
+                                               break; // All remaining subscorers are after currentDoc.
+                                       }
+                                       currentScore += scorerDocQueue.TopScore();
+                                       nrMatchers++;
+                               }
+                               while (true);
+                               
+                               if (nrMatchers >= minimumNrMatchers)
+                               {
+                                       return true;
+                               }
+                               else if (scorerDocQueue.Size() < minimumNrMatchers)
+                               {
+                                       return false;
+                               }
+                       }
+                       while (true);
+               }
+               
+               /// <summary>Returns the score of the current document matching the query.
+               /// Initially invalid, until {@link #Next()} is called the first time.
+               /// </summary>
+               public override float Score()
+               {
+                       return currentScore;
+               }
+               
+               /// <deprecated> use {@link #DocID()} instead. 
+               /// </deprecated>
+        [Obsolete("use DocID() instead. ")]
+               public override int Doc()
+               {
+                       return currentDoc;
+               }
+               
+               public override int DocID()
+               {
+                       return currentDoc;
+               }
+               
+               /// <summary>Returns the number of subscorers matching the current document.
+               /// Initially invalid, until {@link #Next()} is called the first time.
+               /// </summary>
+               public virtual int NrMatchers()
+               {
+                       return nrMatchers;
+               }
+               
+               /// <summary> Skips to the first match beyond the current whose document number is
+               /// greater than or equal to a given target. <br/>
+               /// When this method is used the {@link #Explain(int)} method should not be
+               /// used. <br/>
+               /// The implementation uses the skipTo() method on the subscorers.
+               /// 
+               /// </summary>
+               /// <param name="target">The target document number.
+               /// </param>
+               /// <returns> true iff there is such a match.
+               /// </returns>
+               /// <deprecated> use {@link #Advance(int)} instead.
+               /// </deprecated>
+        [Obsolete("use Advance(int) instead.")]
+               public override bool SkipTo(int target)
+               {
+                       return Advance(target) != NO_MORE_DOCS;
+               }
+               
+               /// <summary> Advances to the first match beyond the current whose document number is
+               /// greater than or equal to a given target. <br/>
+               /// When this method is used the {@link #Explain(int)} method should not be
+               /// used. <br/>
+               /// The implementation uses the skipTo() method on the subscorers.
+               /// 
+               /// </summary>
+               /// <param name="target">The target document number.
+               /// </param>
+               /// <returns> the document whose number is greater than or equal to the given
+               /// target, or -1 if none exist.
+               /// </returns>
+               public override int Advance(int target)
+               {
+                       if (scorerDocQueue.Size() < minimumNrMatchers)
+                       {
+                               return currentDoc = NO_MORE_DOCS;
+                       }
+                       if (target <= currentDoc)
+                       {
+                               return currentDoc;
+                       }
+                       do 
+                       {
+                               if (scorerDocQueue.TopDoc() >= target)
+                               {
+                                       return AdvanceAfterCurrent()?currentDoc:(currentDoc = NO_MORE_DOCS);
+                               }
+                               else if (!scorerDocQueue.TopSkipToAndAdjustElsePop(target))
+                               {
+                                       if (scorerDocQueue.Size() < minimumNrMatchers)
+                                       {
+                                               return currentDoc = NO_MORE_DOCS;
+                                       }
+                               }
+                       }
+                       while (true);
+               }
+               
+               /// <returns> An explanation for the score of a given document. 
+               /// </returns>
+               public override Explanation Explain(int doc)
+               {
+                       Explanation res = new Explanation();
+                       System.Collections.IEnumerator ssi = subScorers.GetEnumerator();
+                       float sumScore = 0.0f;
+                       int nrMatches = 0;
+                       while (ssi.MoveNext())
+                       {
+                               Explanation es = ((Scorer) ssi.Current).Explain(doc);
+                               if (es.GetValue() > 0.0f)
+                               {
+                                       // indicates match
+                                       sumScore += es.GetValue();
+                                       nrMatches++;
+                               }
+                               res.AddDetail(es);
+                       }
+                       if (nrMatchers >= minimumNrMatchers)
+                       {
+                               res.SetValue(sumScore);
+                               res.SetDescription("sum over at least " + minimumNrMatchers + " of " + subScorers.Count + ":");
+                       }
+                       else
+                       {
+                               res.SetValue(0.0f);
+                               res.SetDescription(nrMatches + " match(es) but at least " + minimumNrMatchers + " of " + subScorers.Count + " needed");
+                       }
+                       return res;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/DocIdSet.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/DocIdSet.cs
new file mode 100644 (file)
index 0000000..3e983d4
--- /dev/null
@@ -0,0 +1,111 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary> A DocIdSet contains a set of doc ids. Implementing classes must
+       /// only implement {@link #iterator} to provide access to the set. 
+       /// </summary>
+       [Serializable]
+       public abstract class DocIdSet
+       {
+               public class AnonymousClassDocIdSet:DocIdSet
+               {
+                       public AnonymousClassDocIdSet()
+                       {
+                               InitBlock();
+                       }
+                       public class AnonymousClassDocIdSetIterator:DocIdSetIterator
+                       {
+                               public AnonymousClassDocIdSetIterator(AnonymousClassDocIdSet enclosingInstance)
+                               {
+                                       InitBlock(enclosingInstance);
+                               }
+                               private void  InitBlock(AnonymousClassDocIdSet enclosingInstance)
+                               {
+                                       this.enclosingInstance = enclosingInstance;
+                               }
+                               private AnonymousClassDocIdSet enclosingInstance;
+                               public AnonymousClassDocIdSet Enclosing_Instance
+                               {
+                                       get
+                                       {
+                                               return enclosingInstance;
+                                       }
+                                       
+                               }
+                               public override int Advance(int target)
+                               {
+                                       return NO_MORE_DOCS;
+                               }
+                               public override int DocID()
+                               {
+                                       return NO_MORE_DOCS;
+                               }
+                               public override int NextDoc()
+                               {
+                                       return NO_MORE_DOCS;
+                               }
+                       }
+                       private void  InitBlock()
+                       {
+                               iterator = new AnonymousClassDocIdSetIterator(this);
+                       }
+                       
+                       private DocIdSetIterator iterator;
+                       
+                       public override DocIdSetIterator Iterator()
+                       {
+                               return iterator;
+                       }
+
+                       public override bool IsCacheable()
+                       {
+                               return true;
+                       }
+               }
+               
+               /// <summary>An empty {@code DocIdSet} instance for easy use, e.g. in Filters that hit no documents. </summary>
+               [NonSerialized]
+               public static readonly DocIdSet EMPTY_DOCIDSET;
+               
+               /// <summary>Provides a {@link DocIdSetIterator} to access the set.
+               /// This implementation can return <code>null</code> or
+               /// <code>{@linkplain #EMPTY_DOCIDSET}.iterator()</code> if there
+               /// are no docs that match. 
+               /// </summary>
+               public abstract DocIdSetIterator Iterator();
+
+               /// <summary>This method is a hint for {@link CachingWrapperFilter}, if this <code>DocIdSet</code>
+               /// should be cached without copying it into a BitSet. The default is to return
+               /// <code>false</code>. If you have an own <code>DocIdSet</code> implementation
+               /// that does its iteration very effective and fast without doing disk I/O,
+               /// override this method and return true.
+               /// </summary>
+               public virtual bool IsCacheable()
+               {
+                       return false;
+               }
+               static DocIdSet()
+               {
+                       EMPTY_DOCIDSET = new AnonymousClassDocIdSet();
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/DocIdSetIterator.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/DocIdSetIterator.cs
new file mode 100644 (file)
index 0000000..bb998a5
--- /dev/null
@@ -0,0 +1,161 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary> This abstract class defines methods to iterate over a set of non-decreasing
+       /// doc ids. Note that this class assumes it iterates on doc Ids, and therefore
+       /// {@link #NO_MORE_DOCS} is set to {@value #NO_MORE_DOCS} in order to be used as
+       /// a sentinel object. Implementations of this class are expected to consider
+       /// {@link Integer#MAX_VALUE} as an invalid value.
+       /// </summary>
+       public abstract class DocIdSetIterator
+       {
+               
+               // TODO (3.0): review the javadocs and remove any references to '3.0'.
+               private int doc = - 1;
+               
+               /// <summary> When returned by {@link #NextDoc()}, {@link #Advance(int)} and
+               /// {@link #Doc()} it means there are no more docs in the iterator.
+               /// </summary>
+               public static readonly int NO_MORE_DOCS = System.Int32.MaxValue;
+               
+               /// <summary> Unsupported anymore. Call {@link #DocID()} instead. This method throws
+               /// {@link UnsupportedOperationException} if called.
+               /// 
+               /// </summary>
+               /// <deprecated> use {@link #DocID()} instead.
+               /// </deprecated>
+        [Obsolete("use DocID() instead.")]
+               public virtual int Doc()
+               {
+                       throw new System.NotSupportedException("Call docID() instead.");
+               }
+               
+               /// <summary> Returns the following:
+               /// <ul>
+               /// <li>-1 or {@link #NO_MORE_DOCS} if {@link #NextDoc()} or
+               /// {@link #Advance(int)} were not called yet.</li>
+               /// <li>{@link #NO_MORE_DOCS} if the iterator has exhausted.</li>
+               /// <li>Otherwise it should return the doc ID it is currently on.</li>
+               /// </ul>
+               /// <p/>
+               /// <b>NOTE:</b> in 3.0, this method will become abstract.
+               /// 
+               /// </summary>
+               /// <since> 2.9
+               /// </since>
+               public virtual int DocID()
+               {
+                       return doc;
+               }
+               
+               /// <summary> Unsupported anymore. Call {@link #NextDoc()} instead. This method throws
+               /// {@link UnsupportedOperationException} if called.
+               /// 
+               /// </summary>
+               /// <deprecated> use {@link #NextDoc()} instead. This will be removed in 3.0
+               /// </deprecated>
+        [Obsolete("use NextDoc() instead. This will be removed in 3.0")]
+               public virtual bool Next()
+               {
+                       throw new System.NotSupportedException("Call nextDoc() instead.");
+               }
+               
+               /// <summary> Unsupported anymore. Call {@link #Advance(int)} instead. This method throws
+               /// {@link UnsupportedOperationException} if called.
+               /// 
+               /// </summary>
+               /// <deprecated> use {@link #Advance(int)} instead. This will be removed in 3.0
+               /// </deprecated>
+        [Obsolete("use Advance(int) instead. This will be removed in 3.0")]
+               public virtual bool SkipTo(int target)
+               {
+                       throw new System.NotSupportedException("Call advance() instead.");
+               }
+               
+               /// <summary> Advances to the next document in the set and returns the doc it is
+               /// currently on, or {@link #NO_MORE_DOCS} if there are no more docs in the
+               /// set.<br/>
+               /// 
+               /// <b>NOTE:</b> in 3.0 this method will become abstract, following the removal
+               /// of {@link #Next()}. For backward compatibility it is implemented as:
+               /// 
+               /// <pre>
+               /// public int nextDoc() throws IOException {
+               /// return next() ? doc() : NO_MORE_DOCS;
+               /// }
+               /// </pre>
+               /// 
+               /// <b>NOTE:</b> after the iterator has exhausted you should not call this
+               /// method, as it may result in unpredicted behavior.
+               /// 
+               /// </summary>
+               /// <since> 2.9
+               /// </since>
+               public virtual int NextDoc()
+               {
+                       return doc = Next()?Doc():NO_MORE_DOCS;
+               }
+               
+               /// <summary> Advances to the first beyond the current whose document number is greater
+               /// than or equal to <i>target</i>. Returns the current document number or
+               /// {@link #NO_MORE_DOCS} if there are no more docs in the set.
+               /// <p/>
+               /// Behaves as if written:
+               /// 
+               /// <pre>
+               /// int advance(int target) {
+               /// int doc;
+               /// while ((doc = nextDoc()) &lt; target) {
+               /// }
+               /// return doc;
+               /// }
+               /// </pre>
+               /// 
+               /// Some implementations are considerably more efficient than that.
+               /// <p/>
+               /// <b>NOTE:</b> certain implemenations may return a different value (each
+               /// time) if called several times in a row with the same target.
+               /// <p/>
+               /// <b>NOTE:</b> this method may be called with {@value #NO_MORE_DOCS} for
+               /// efficiency by some Scorers. If your implementation cannot efficiently
+               /// determine that it should exhaust, it is recommended that you check for that
+               /// value in each call to this method.
+               /// <p/>
+               /// <b>NOTE:</b> after the iterator has exhausted you should not call this
+               /// method, as it may result in unpredicted behavior.
+               /// <p/>
+               /// <b>NOTE:</b> in 3.0 this method will become abstract, following the removal
+               /// of {@link #SkipTo(int)}.
+               /// 
+               /// </summary>
+               /// <since> 2.9
+               /// </since>
+               public virtual int Advance(int target)
+               {
+                       if (target == NO_MORE_DOCS)
+                       {
+                               return doc = NO_MORE_DOCS;
+                       }
+                       return doc = SkipTo(target)?Doc():NO_MORE_DOCS;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/ExactPhraseScorer.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/ExactPhraseScorer.cs
new file mode 100644 (file)
index 0000000..f3417d9
--- /dev/null
@@ -0,0 +1,67 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using Mono.Lucene.Net.Index;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       sealed class ExactPhraseScorer:PhraseScorer
+       {
+               
+               internal ExactPhraseScorer(Weight weight, TermPositions[] tps, int[] offsets, Similarity similarity, byte[] norms):base(weight, tps, offsets, similarity, norms)
+               {
+               }
+               
+               protected internal override float PhraseFreq()
+               {
+                       // sort list with pq
+                       pq.Clear();
+                       for (PhrasePositions pp = first; pp != null; pp = pp.next)
+                       {
+                               pp.FirstPosition();
+                               pq.Put(pp); // build pq from list
+                       }
+                       PqToList(); // rebuild list from pq
+                       
+                       // for counting how many times the exact phrase is found in current document,
+                       // just count how many times all PhrasePosition's have exactly the same position.   
+                       int freq = 0;
+                       do 
+                       {
+                               // find position w/ all terms
+                               while (first.position < last.position)
+                               {
+                                       // scan forward in first
+                                       do 
+                                       {
+                                               if (!first.NextPosition())
+                                                       return freq;
+                                       }
+                                       while (first.position < last.position);
+                                       FirstToLast();
+                               }
+                               freq++; // all equal: a match
+                       }
+                       while (last.NextPosition());
+                       
+                       return freq;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Explanation.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Explanation.cs
new file mode 100644 (file)
index 0000000..b0563c9
--- /dev/null
@@ -0,0 +1,176 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary>Expert: Describes the score computation for document and query. </summary>
+       [Serializable]
+       public class Explanation
+       {
+               private float value_Renamed; // the value of this node
+               private System.String description; // what it represents
+               private System.Collections.ArrayList details; // sub-explanations
+               
+               public Explanation()
+               {
+               }
+               
+               public Explanation(float value_Renamed, System.String description)
+               {
+                       this.value_Renamed = value_Renamed;
+                       this.description = description;
+               }
+               
+               /// <summary> Indicates whether or not this Explanation models a good match.
+               /// 
+               /// <p/>
+               /// By default, an Explanation represents a "match" if the value is positive.
+               /// <p/>
+               /// </summary>
+               /// <seealso cref="getValue">
+               /// </seealso>
+               public virtual bool IsMatch()
+               {
+                       return (0.0f < GetValue());
+               }
+               
+               
+               
+               /// <summary>The value assigned to this explanation node. </summary>
+               public virtual float GetValue()
+               {
+                       return value_Renamed;
+               }
+               /// <summary>Sets the value assigned to this explanation node. </summary>
+               public virtual void  SetValue(float value_Renamed)
+               {
+                       this.value_Renamed = value_Renamed;
+               }
+               
+               /// <summary>A description of this explanation node. </summary>
+               public virtual System.String GetDescription()
+               {
+                       return description;
+               }
+               /// <summary>Sets the description of this explanation node. </summary>
+               public virtual void  SetDescription(System.String description)
+               {
+                       this.description = description;
+               }
+               
+               /// <summary> A short one line summary which should contain all high level
+               /// information about this Explanation, without the "Details"
+               /// </summary>
+               protected internal virtual System.String GetSummary()
+               {
+                       return GetValue() + " = " + GetDescription();
+               }
+               
+               /// <summary>The sub-nodes of this explanation node. </summary>
+               public virtual Explanation[] GetDetails()
+               {
+                       if (details == null)
+                               return null;
+                       return (Explanation[]) details.ToArray(typeof(Explanation));
+               }
+               
+               /// <summary>Adds a sub-node to this explanation node. </summary>
+               public virtual void  AddDetail(Explanation detail)
+               {
+                       if (details == null)
+                               details = new System.Collections.ArrayList();
+                       details.Add(detail);
+               }
+               
+               /// <summary>Render an explanation as text. </summary>
+               public override System.String ToString()
+               {
+                       return ToString(0);
+               }
+               public /*protected internal*/ virtual System.String ToString(int depth)
+               {
+                       System.Text.StringBuilder buffer = new System.Text.StringBuilder();
+                       for (int i = 0; i < depth; i++)
+                       {
+                               buffer.Append("  ");
+                       }
+                       buffer.Append(GetSummary());
+                       buffer.Append("\n");
+                       
+                       Explanation[] details = GetDetails();
+                       if (details != null)
+                       {
+                               for (int i = 0; i < details.Length; i++)
+                               {
+                                       buffer.Append(details[i].ToString(depth + 1));
+                               }
+                       }
+                       
+                       return buffer.ToString();
+               }
+               
+               
+               /// <summary>Render an explanation as HTML. </summary>
+               public virtual System.String ToHtml()
+               {
+                       System.Text.StringBuilder buffer = new System.Text.StringBuilder();
+                       buffer.Append("<ul>\n");
+                       
+                       buffer.Append("<li>");
+                       buffer.Append(GetSummary());
+                       buffer.Append("<br />\n");
+                       
+                       Explanation[] details = GetDetails();
+                       if (details != null)
+                       {
+                               for (int i = 0; i < details.Length; i++)
+                               {
+                                       buffer.Append(details[i].ToHtml());
+                               }
+                       }
+                       
+                       buffer.Append("</li>\n");
+                       buffer.Append("</ul>\n");
+                       
+                       return buffer.ToString();
+               }
+               
+               /// <summary> Small Util class used to pass both an idf factor as well as an
+               /// explanation for that factor.
+               /// 
+               /// This class will likely be held on a {@link Weight}, so be aware 
+               /// before storing any large or un-serializable fields.
+               /// 
+               /// </summary>
+               [Serializable]
+               public abstract class IDFExplanation
+               {
+                       /// <returns> the idf factor
+                       /// </returns>
+                       public abstract float GetIdf();
+                       /// <summary> This should be calculated lazily if possible.
+                       /// 
+                       /// </summary>
+                       /// <returns> the explanation for the idf factor.
+                       /// </returns>
+                       public abstract System.String Explain();
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/ExtendedFieldCache.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/ExtendedFieldCache.cs
new file mode 100644 (file)
index 0000000..341f351
--- /dev/null
@@ -0,0 +1,69 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+
+namespace Mono.Lucene.Net.Search.ExtendedFieldCache_old
+{
+       
+       /// <summary> This interface is obsolete, use {@link FieldCache} instead.
+       /// 
+       /// </summary>
+       /// <deprecated> Use {@link FieldCache}, this will be removed in Lucene 3.0
+       /// 
+       /// </deprecated>
+    [Obsolete("Use FieldCache, this will be removed in Lucene 3.0")]
+       public struct ExtendedFieldCache_Fields{
+               /// <deprecated> Use {@link FieldCache#DEFAULT}; this will be removed in Lucene 3.0 
+               /// </deprecated>
+        [Obsolete("Use FieldCache.DEFAULT; this will be removed in Lucene 3.0 ")]
+               public readonly static ExtendedFieldCache EXT_DEFAULT;
+               static ExtendedFieldCache_Fields()
+               {
+                       EXT_DEFAULT = (ExtendedFieldCache) Mono.Lucene.Net.Search.FieldCache_Fields.DEFAULT;
+               }
+       }
+       public interface ExtendedFieldCache:FieldCache
+       {
+               
+               /// <deprecated> Will be removed in 3.0, this is for binary compatibility only 
+               /// </deprecated>
+        [Obsolete("Will be removed in 3.0, this is for binary compatibility only ")]
+               new long[] GetLongs(IndexReader reader, System.String field, Mono.Lucene.Net.Search.LongParser parser);
+               
+               /// <deprecated> Will be removed in 3.0, this is for binary compatibility only 
+               /// </deprecated>
+        [Obsolete("Will be removed in 3.0, this is for binary compatibility only ")]
+               new double[] GetDoubles(IndexReader reader, System.String field, Mono.Lucene.Net.Search.DoubleParser parser);
+       }
+
+       /// <deprecated> Use {@link FieldCache.LongParser}, this will be removed in Lucene 3.0 
+       /// </deprecated>
+    [Obsolete("Use FieldCache.LongParser, this will be removed in Lucene 3.0 ")]
+       public interface LongParser:Mono.Lucene.Net.Search.LongParser
+       {
+       }
+
+       /// <deprecated> Use {@link FieldCache.DoubleParser}, this will be removed in Lucene 3.0 
+       /// </deprecated>
+    [Obsolete("Use FieldCache.DoubleParser, this will be removed in Lucene 3.0 ")]
+       public interface DoubleParser:Mono.Lucene.Net.Search.DoubleParser
+       {
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/FieldCache.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/FieldCache.cs
new file mode 100644 (file)
index 0000000..2922b56
--- /dev/null
@@ -0,0 +1,755 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using NumericTokenStream = Mono.Lucene.Net.Analysis.NumericTokenStream;
+using NumericField = Mono.Lucene.Net.Documents.NumericField;
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+using NumericUtils = Mono.Lucene.Net.Util.NumericUtils;
+using RamUsageEstimator = Mono.Lucene.Net.Util.RamUsageEstimator;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary> Expert: Maintains caches of term values.
+       /// 
+       /// <p/>Created: May 19, 2004 11:13:14 AM
+       /// 
+       /// </summary>
+       /// <since>   lucene 1.4
+       /// </since>
+       /// <version>  $Id: FieldCache.java 807841 2009-08-25 22:27:31Z markrmiller $
+       /// </version>
+       /// <seealso cref="Mono.Lucene.Net.Util.FieldCacheSanityChecker">
+       /// </seealso>
+       public sealed class CreationPlaceholder
+       {
+               internal System.Object value_Renamed;
+       }
+       /// <summary>Expert: Stores term text values and document ordering data. </summary>
+       public class StringIndex
+       {
+               
+               public virtual int BinarySearchLookup(System.String key)
+               {
+                       // this special case is the reason that Arrays.binarySearch() isn't useful.
+                       if (key == null)
+                               return 0;
+                       
+                       int low = 1;
+                       int high = lookup.Length - 1;
+                       
+                       while (low <= high)
+                       {
+                               int mid = SupportClass.Number.URShift((low + high), 1);
+                               int cmp = String.CompareOrdinal(lookup[mid], key);
+                               
+                               if (cmp < 0)
+                                       low = mid + 1;
+                               else if (cmp > 0)
+                                       high = mid - 1;
+                               else
+                                       return mid; // key found
+                       }
+                       return - (low + 1); // key not found.
+               }
+               
+               /// <summary>All the term values, in natural order. </summary>
+               public System.String[] lookup;
+               
+               /// <summary>For each document, an index into the lookup array. </summary>
+               public int[] order;
+               
+               /// <summary>Creates one of these objects </summary>
+               public StringIndex(int[] values, System.String[] lookup)
+               {
+                       this.order = values;
+                       this.lookup = lookup;
+               }
+       }
+       /// <summary> EXPERT: A unique Identifier/Description for each item in the FieldCache. 
+       /// Can be useful for logging/debugging.
+       /// <p/>
+       /// <b>EXPERIMENTAL API:</b> This API is considered extremely advanced 
+       /// and experimental.  It may be removed or altered w/o warning in future 
+       /// releases 
+       /// of Lucene.
+       /// <p/>
+       /// </summary>
+       public abstract class CacheEntry
+       {
+               public abstract System.Object GetReaderKey();
+               public abstract System.String GetFieldName();
+               public abstract System.Type GetCacheType();
+               public abstract System.Object GetCustom();
+               public abstract System.Object GetValue();
+               private System.String size = null;
+               protected internal void  SetEstimatedSize(System.String size)
+               {
+                       this.size = size;
+               }
+               /// <seealso cref="EstimateSize(RamUsageEstimator)">
+               /// </seealso>
+               public virtual void  EstimateSize()
+               {
+                       EstimateSize(new RamUsageEstimator(false)); // doesn't check for interned
+               }
+               /// <summary> Computes (and stores) the estimated size of the cache Value </summary>
+               /// <seealso cref="getEstimatedSize">
+               /// </seealso>
+               public virtual void  EstimateSize(RamUsageEstimator ramCalc)
+               {
+                       long size = ramCalc.EstimateRamUsage(GetValue());
+            SetEstimatedSize(RamUsageEstimator.HumanReadableUnits(size, new System.Globalization.NumberFormatInfo()));  // {{Aroush-2.9}} in Java, the formater is set to "0.#", so we need to do the same in C#
+               }
+               /// <summary> The most recently estimated size of the value, null unless 
+               /// estimateSize has been called.
+               /// </summary>
+               public System.String GetEstimatedSize()
+               {
+                       return size;
+               }
+               
+               
+               public override System.String ToString()
+               {
+                       System.Text.StringBuilder b = new System.Text.StringBuilder();
+                       b.Append("'").Append(GetReaderKey()).Append("'=>");
+                       b.Append("'").Append(GetFieldName()).Append("',");
+                       b.Append(GetCacheType()).Append(",").Append(GetCustom());
+                       b.Append("=>").Append(GetValue().GetType().FullName).Append("#");
+                       b.Append(GetValue().GetHashCode());
+                       
+                       System.String s = GetEstimatedSize();
+                       if (null != s)
+                       {
+                               b.Append(" (size =~ ").Append(s).Append(')');
+                       }
+                       
+                       return b.ToString();
+               }
+       }
+       public struct FieldCache_Fields{
+               /// <summary>Indicator for StringIndex values in the cache. </summary>
+               // NOTE: the value assigned to this constant must not be
+               // the same as any of those in SortField!!
+               public readonly static int STRING_INDEX = - 1;
+               /// <summary>Expert: The cache used internally by sorting and range query classes. </summary>
+               public readonly static FieldCache DEFAULT;
+               /// <summary>The default parser for byte values, which are encoded by {@link Byte#toString(byte)} </summary>
+               public readonly static ByteParser DEFAULT_BYTE_PARSER;
+               /// <summary>The default parser for short values, which are encoded by {@link Short#toString(short)} </summary>
+               public readonly static ShortParser DEFAULT_SHORT_PARSER;
+               /// <summary>The default parser for int values, which are encoded by {@link Integer#toString(int)} </summary>
+               public readonly static IntParser DEFAULT_INT_PARSER;
+               /// <summary>The default parser for float values, which are encoded by {@link Float#toString(float)} </summary>
+               public readonly static FloatParser DEFAULT_FLOAT_PARSER;
+               /// <summary>The default parser for long values, which are encoded by {@link Long#toString(long)} </summary>
+               public readonly static LongParser DEFAULT_LONG_PARSER;
+               /// <summary>The default parser for double values, which are encoded by {@link Double#toString(double)} </summary>
+               public readonly static DoubleParser DEFAULT_DOUBLE_PARSER;
+               /// <summary> A parser instance for int values encoded by {@link NumericUtils#IntToPrefixCoded(int)}, e.g. when indexed
+               /// via {@link NumericField}/{@link NumericTokenStream}.
+               /// </summary>
+               public readonly static IntParser NUMERIC_UTILS_INT_PARSER;
+               /// <summary> A parser instance for float values encoded with {@link NumericUtils}, e.g. when indexed
+               /// via {@link NumericField}/{@link NumericTokenStream}.
+               /// </summary>
+               public readonly static FloatParser NUMERIC_UTILS_FLOAT_PARSER;
+               /// <summary> A parser instance for long values encoded by {@link NumericUtils#LongToPrefixCoded(long)}, e.g. when indexed
+               /// via {@link NumericField}/{@link NumericTokenStream}.
+               /// </summary>
+               public readonly static LongParser NUMERIC_UTILS_LONG_PARSER;
+               /// <summary> A parser instance for double values encoded with {@link NumericUtils}, e.g. when indexed
+               /// via {@link NumericField}/{@link NumericTokenStream}.
+               /// </summary>
+               public readonly static DoubleParser NUMERIC_UTILS_DOUBLE_PARSER;
+               static FieldCache_Fields()
+               {
+                       DEFAULT = new FieldCacheImpl();
+                       DEFAULT_BYTE_PARSER = new AnonymousClassByteParser();
+                       DEFAULT_SHORT_PARSER = new AnonymousClassShortParser();
+                       DEFAULT_INT_PARSER = new AnonymousClassIntParser();
+                       DEFAULT_FLOAT_PARSER = new AnonymousClassFloatParser();
+                       DEFAULT_LONG_PARSER = new AnonymousClassLongParser();
+                       DEFAULT_DOUBLE_PARSER = new AnonymousClassDoubleParser();
+                       NUMERIC_UTILS_INT_PARSER = new AnonymousClassIntParser1();
+                       NUMERIC_UTILS_FLOAT_PARSER = new AnonymousClassFloatParser1();
+                       NUMERIC_UTILS_LONG_PARSER = new AnonymousClassLongParser1();
+                       NUMERIC_UTILS_DOUBLE_PARSER = new AnonymousClassDoubleParser1();
+               }
+       }
+    
+       [Serializable]
+       class AnonymousClassByteParser : ByteParser
+       {
+               public virtual sbyte ParseByte(System.String value_Renamed)
+               {
+            return System.SByte.Parse(value_Renamed);
+               }
+               protected internal virtual System.Object ReadResolve()
+               {
+                       return Mono.Lucene.Net.Search.FieldCache_Fields.DEFAULT_BYTE_PARSER;
+               }
+               public override System.String ToString()
+               {
+                       return typeof(FieldCache).FullName + ".DEFAULT_BYTE_PARSER";
+               }
+       }
+       [Serializable]
+       class AnonymousClassShortParser : ShortParser
+       {
+               public virtual short ParseShort(System.String value_Renamed)
+               {
+                       return System.Int16.Parse(value_Renamed);
+               }
+               protected internal virtual System.Object ReadResolve()
+               {
+                       return Mono.Lucene.Net.Search.FieldCache_Fields.DEFAULT_SHORT_PARSER;
+               }
+               public override System.String ToString()
+               {
+                       return typeof(FieldCache).FullName + ".DEFAULT_SHORT_PARSER";
+               }
+       }
+       [Serializable]
+       class AnonymousClassIntParser : IntParser
+       {
+               public virtual int ParseInt(System.String value_Renamed)
+               {
+                       return System.Int32.Parse(value_Renamed);
+               }
+               protected internal virtual System.Object ReadResolve()
+               {
+                       return Mono.Lucene.Net.Search.FieldCache_Fields.DEFAULT_INT_PARSER;
+               }
+               public override System.String ToString()
+               {
+                       return typeof(FieldCache).FullName + ".DEFAULT_INT_PARSER";
+               }
+       }
+       [Serializable]
+       class AnonymousClassFloatParser : FloatParser
+       {
+               public virtual float ParseFloat(System.String value_Renamed)
+               {
+            try
+            {
+                return SupportClass.Single.Parse(value_Renamed);
+            }
+            catch (System.OverflowException)
+            {
+                return value_Renamed.StartsWith("-") ? float.PositiveInfinity : float.NegativeInfinity;
+            }
+               }
+               protected internal virtual System.Object ReadResolve()
+               {
+                       return Mono.Lucene.Net.Search.FieldCache_Fields.DEFAULT_FLOAT_PARSER;
+               }
+               public override System.String ToString()
+               {
+                       return typeof(FieldCache).FullName + ".DEFAULT_FLOAT_PARSER";
+               }
+       }
+       [Serializable]
+       class AnonymousClassLongParser : LongParser
+       {
+               public virtual long ParseLong(System.String value_Renamed)
+               {
+                       return System.Int64.Parse(value_Renamed);
+               }
+               protected internal virtual System.Object ReadResolve()
+               {
+                       return Mono.Lucene.Net.Search.FieldCache_Fields.DEFAULT_LONG_PARSER;
+               }
+               public override System.String ToString()
+               {
+                       return typeof(FieldCache).FullName + ".DEFAULT_LONG_PARSER";
+               }
+       }
+       [Serializable]
+       class AnonymousClassDoubleParser : DoubleParser
+       {
+               public virtual double ParseDouble(System.String value_Renamed)
+               {
+                       return SupportClass.Double.Parse(value_Renamed);
+               }
+               protected internal virtual System.Object ReadResolve()
+               {
+                       return Mono.Lucene.Net.Search.FieldCache_Fields.DEFAULT_DOUBLE_PARSER;
+               }
+               public override System.String ToString()
+               {
+                       return typeof(FieldCache).FullName + ".DEFAULT_DOUBLE_PARSER";
+               }
+       }
+       [Serializable]
+       class AnonymousClassIntParser1 : IntParser
+       {
+               public virtual int ParseInt(System.String val)
+               {
+                       int shift = val[0] - NumericUtils.SHIFT_START_INT;
+                       if (shift > 0 && shift <= 31)
+                               throw new FieldCacheImpl.StopFillCacheException();
+                       return NumericUtils.PrefixCodedToInt(val);
+               }
+               protected internal virtual System.Object ReadResolve()
+               {
+                       return Mono.Lucene.Net.Search.FieldCache_Fields.NUMERIC_UTILS_INT_PARSER;
+               }
+               public override System.String ToString()
+               {
+                       return typeof(FieldCache).FullName + ".NUMERIC_UTILS_INT_PARSER";
+               }
+       }
+       [Serializable]
+       class AnonymousClassFloatParser1 : FloatParser
+       {
+               public virtual float ParseFloat(System.String val)
+               {
+                       int shift = val[0] - NumericUtils.SHIFT_START_INT;
+                       if (shift > 0 && shift <= 31)
+                               throw new FieldCacheImpl.StopFillCacheException();
+                       return NumericUtils.SortableIntToFloat(NumericUtils.PrefixCodedToInt(val));
+               }
+               protected internal virtual System.Object ReadResolve()
+               {
+                       return Mono.Lucene.Net.Search.FieldCache_Fields.NUMERIC_UTILS_FLOAT_PARSER;
+               }
+               public override System.String ToString()
+               {
+                       return typeof(FieldCache).FullName + ".NUMERIC_UTILS_FLOAT_PARSER";
+               }
+       }
+       [Serializable]
+       class AnonymousClassLongParser1 : LongParser
+       {
+               public virtual long ParseLong(System.String val)
+               {
+                       int shift = val[0] - NumericUtils.SHIFT_START_LONG;
+                       if (shift > 0 && shift <= 63)
+                               throw new FieldCacheImpl.StopFillCacheException();
+                       return NumericUtils.PrefixCodedToLong(val);
+               }
+               protected internal virtual System.Object ReadResolve()
+               {
+                       return Mono.Lucene.Net.Search.FieldCache_Fields.NUMERIC_UTILS_LONG_PARSER;
+               }
+               public override System.String ToString()
+               {
+                       return typeof(FieldCache).FullName + ".NUMERIC_UTILS_LONG_PARSER";
+               }
+       }
+       [Serializable]
+       class AnonymousClassDoubleParser1 : DoubleParser
+       {
+               public virtual double ParseDouble(System.String val)
+               {
+                       int shift = val[0] - NumericUtils.SHIFT_START_LONG;
+                       if (shift > 0 && shift <= 63)
+                               throw new FieldCacheImpl.StopFillCacheException();
+                       return NumericUtils.SortableLongToDouble(NumericUtils.PrefixCodedToLong(val));
+               }
+               protected internal virtual System.Object ReadResolve()
+               {
+                       return Mono.Lucene.Net.Search.FieldCache_Fields.NUMERIC_UTILS_DOUBLE_PARSER;
+               }
+               public override System.String ToString()
+               {
+                       return typeof(FieldCache).FullName + ".NUMERIC_UTILS_DOUBLE_PARSER";
+               }
+       }
+       public interface FieldCache
+       {
+               
+               /// <summary>Checks the internal cache for an appropriate entry, and if none is
+               /// found, reads the terms in <code>field</code> as a single byte and returns an array
+               /// of size <code>reader.maxDoc()</code> of the value each document
+               /// has in the given field.
+               /// </summary>
+               /// <param name="reader"> Used to get field values.
+               /// </param>
+               /// <param name="field">  Which field contains the single byte values.
+               /// </param>
+               /// <returns> The values in the given field for each document.
+               /// </returns>
+               /// <throws>  IOException  If any error occurs. </throws>
+               sbyte[] GetBytes(IndexReader reader, System.String field);
+               
+               /// <summary>Checks the internal cache for an appropriate entry, and if none is found,
+               /// reads the terms in <code>field</code> as bytes and returns an array of
+               /// size <code>reader.maxDoc()</code> of the value each document has in the
+               /// given field.
+               /// </summary>
+               /// <param name="reader"> Used to get field values.
+               /// </param>
+               /// <param name="field">  Which field contains the bytes.
+               /// </param>
+               /// <param name="parser"> Computes byte for string values.
+               /// </param>
+               /// <returns> The values in the given field for each document.
+               /// </returns>
+               /// <throws>  IOException  If any error occurs. </throws>
+               sbyte[] GetBytes(IndexReader reader, System.String field, ByteParser parser);
+               
+               /// <summary>Checks the internal cache for an appropriate entry, and if none is
+               /// found, reads the terms in <code>field</code> as shorts and returns an array
+               /// of size <code>reader.maxDoc()</code> of the value each document
+               /// has in the given field.
+               /// </summary>
+               /// <param name="reader"> Used to get field values.
+               /// </param>
+               /// <param name="field">  Which field contains the shorts.
+               /// </param>
+               /// <returns> The values in the given field for each document.
+               /// </returns>
+               /// <throws>  IOException  If any error occurs. </throws>
+               short[] GetShorts(IndexReader reader, System.String field);
+               
+               /// <summary>Checks the internal cache for an appropriate entry, and if none is found,
+               /// reads the terms in <code>field</code> as shorts and returns an array of
+               /// size <code>reader.maxDoc()</code> of the value each document has in the
+               /// given field.
+               /// </summary>
+               /// <param name="reader"> Used to get field values.
+               /// </param>
+               /// <param name="field">  Which field contains the shorts.
+               /// </param>
+               /// <param name="parser"> Computes short for string values.
+               /// </param>
+               /// <returns> The values in the given field for each document.
+               /// </returns>
+               /// <throws>  IOException  If any error occurs. </throws>
+               short[] GetShorts(IndexReader reader, System.String field, ShortParser parser);
+               
+               /// <summary>Checks the internal cache for an appropriate entry, and if none is
+               /// found, reads the terms in <code>field</code> as integers and returns an array
+               /// of size <code>reader.maxDoc()</code> of the value each document
+               /// has in the given field.
+               /// </summary>
+               /// <param name="reader"> Used to get field values.
+               /// </param>
+               /// <param name="field">  Which field contains the integers.
+               /// </param>
+               /// <returns> The values in the given field for each document.
+               /// </returns>
+               /// <throws>  IOException  If any error occurs. </throws>
+               int[] GetInts(IndexReader reader, System.String field);
+               
+               /// <summary>Checks the internal cache for an appropriate entry, and if none is found,
+               /// reads the terms in <code>field</code> as integers and returns an array of
+               /// size <code>reader.maxDoc()</code> of the value each document has in the
+               /// given field.
+               /// </summary>
+               /// <param name="reader"> Used to get field values.
+               /// </param>
+               /// <param name="field">  Which field contains the integers.
+               /// </param>
+               /// <param name="parser"> Computes integer for string values.
+               /// </param>
+               /// <returns> The values in the given field for each document.
+               /// </returns>
+               /// <throws>  IOException  If any error occurs. </throws>
+               int[] GetInts(IndexReader reader, System.String field, IntParser parser);
+               
+               /// <summary>Checks the internal cache for an appropriate entry, and if
+               /// none is found, reads the terms in <code>field</code> as floats and returns an array
+               /// of size <code>reader.maxDoc()</code> of the value each document
+               /// has in the given field.
+               /// </summary>
+               /// <param name="reader"> Used to get field values.
+               /// </param>
+               /// <param name="field">  Which field contains the floats.
+               /// </param>
+               /// <returns> The values in the given field for each document.
+               /// </returns>
+               /// <throws>  IOException  If any error occurs. </throws>
+               float[] GetFloats(IndexReader reader, System.String field);
+               
+               /// <summary>Checks the internal cache for an appropriate entry, and if
+               /// none is found, reads the terms in <code>field</code> as floats and returns an array
+               /// of size <code>reader.maxDoc()</code> of the value each document
+               /// has in the given field.
+               /// </summary>
+               /// <param name="reader"> Used to get field values.
+               /// </param>
+               /// <param name="field">  Which field contains the floats.
+               /// </param>
+               /// <param name="parser"> Computes float for string values.
+               /// </param>
+               /// <returns> The values in the given field for each document.
+               /// </returns>
+               /// <throws>  IOException  If any error occurs. </throws>
+               float[] GetFloats(IndexReader reader, System.String field, FloatParser parser);
+               
+               /// <summary> Checks the internal cache for an appropriate entry, and if none is
+               /// found, reads the terms in <code>field</code> as longs and returns an array
+               /// of size <code>reader.maxDoc()</code> of the value each document
+               /// has in the given field.
+               /// 
+               /// </summary>
+               /// <param name="reader">Used to get field values.
+               /// </param>
+               /// <param name="field"> Which field contains the longs.
+               /// </param>
+               /// <returns> The values in the given field for each document.
+               /// </returns>
+               /// <throws>  java.io.IOException If any error occurs. </throws>
+               long[] GetLongs(IndexReader reader, System.String field);
+               
+               /// <summary> Checks the internal cache for an appropriate entry, and if none is found,
+               /// reads the terms in <code>field</code> as longs and returns an array of
+               /// size <code>reader.maxDoc()</code> of the value each document has in the
+               /// given field.
+               /// 
+               /// </summary>
+               /// <param name="reader">Used to get field values.
+               /// </param>
+               /// <param name="field"> Which field contains the longs.
+               /// </param>
+               /// <param name="parser">Computes integer for string values.
+               /// </param>
+               /// <returns> The values in the given field for each document.
+               /// </returns>
+               /// <throws>  IOException If any error occurs. </throws>
+               long[] GetLongs(IndexReader reader, System.String field, LongParser parser);
+               
+               
+               /// <summary> Checks the internal cache for an appropriate entry, and if none is
+               /// found, reads the terms in <code>field</code> as integers and returns an array
+               /// of size <code>reader.maxDoc()</code> of the value each document
+               /// has in the given field.
+               /// 
+               /// </summary>
+               /// <param name="reader">Used to get field values.
+               /// </param>
+               /// <param name="field"> Which field contains the doubles.
+               /// </param>
+               /// <returns> The values in the given field for each document.
+               /// </returns>
+               /// <throws>  IOException If any error occurs. </throws>
+               double[] GetDoubles(IndexReader reader, System.String field);
+               
+               /// <summary> Checks the internal cache for an appropriate entry, and if none is found,
+               /// reads the terms in <code>field</code> as doubles and returns an array of
+               /// size <code>reader.maxDoc()</code> of the value each document has in the
+               /// given field.
+               /// 
+               /// </summary>
+               /// <param name="reader">Used to get field values.
+               /// </param>
+               /// <param name="field"> Which field contains the doubles.
+               /// </param>
+               /// <param name="parser">Computes integer for string values.
+               /// </param>
+               /// <returns> The values in the given field for each document.
+               /// </returns>
+               /// <throws>  IOException If any error occurs. </throws>
+               double[] GetDoubles(IndexReader reader, System.String field, DoubleParser parser);
+               
+               /// <summary>Checks the internal cache for an appropriate entry, and if none
+               /// is found, reads the term values in <code>field</code> and returns an array
+               /// of size <code>reader.maxDoc()</code> containing the value each document
+               /// has in the given field.
+               /// </summary>
+               /// <param name="reader"> Used to get field values.
+               /// </param>
+               /// <param name="field">  Which field contains the strings.
+               /// </param>
+               /// <returns> The values in the given field for each document.
+               /// </returns>
+               /// <throws>  IOException  If any error occurs. </throws>
+               System.String[] GetStrings(IndexReader reader, System.String field);
+               
+               /// <summary>Checks the internal cache for an appropriate entry, and if none
+               /// is found reads the term values in <code>field</code> and returns
+               /// an array of them in natural order, along with an array telling
+               /// which element in the term array each document uses.
+               /// </summary>
+               /// <param name="reader"> Used to get field values.
+               /// </param>
+               /// <param name="field">  Which field contains the strings.
+               /// </param>
+               /// <returns> Array of terms and index into the array for each document.
+               /// </returns>
+               /// <throws>  IOException  If any error occurs. </throws>
+               StringIndex GetStringIndex(IndexReader reader, System.String field);
+               
+               /// <summary>Checks the internal cache for an appropriate entry, and if
+               /// none is found reads <code>field</code> to see if it contains integers, longs, floats
+               /// or strings, and then calls one of the other methods in this class to get the
+               /// values.  For string values, a StringIndex is returned.  After
+               /// calling this method, there is an entry in the cache for both
+               /// type <code>AUTO</code> and the actual found type.
+               /// </summary>
+               /// <param name="reader"> Used to get field values.
+               /// </param>
+               /// <param name="field">  Which field contains the values.
+               /// </param>
+               /// <returns> int[], long[], float[] or StringIndex.
+               /// </returns>
+               /// <throws>  IOException  If any error occurs. </throws>
+               /// <deprecated> Please specify the exact type, instead.
+               /// Especially, guessing does <b>not</b> work with the new
+               /// {@link NumericField} type.
+               /// </deprecated>
+        [Obsolete("Please specify the exact type, instead. Especially, guessing does not work with the new NumericField type.")]
+               System.Object GetAuto(IndexReader reader, System.String field);
+               
+               /// <summary>Checks the internal cache for an appropriate entry, and if none
+               /// is found reads the terms out of <code>field</code> and calls the given SortComparator
+               /// to get the sort values.  A hit in the cache will happen if <code>reader</code>,
+               /// <code>field</code>, and <code>comparator</code> are the same (using <code>equals()</code>)
+               /// as a previous call to this method.
+               /// </summary>
+               /// <param name="reader"> Used to get field values.
+               /// </param>
+               /// <param name="field">  Which field contains the values.
+               /// </param>
+               /// <param name="comparator">Used to convert terms into something to sort by.
+               /// </param>
+               /// <returns> Array of sort objects, one for each document.
+               /// </returns>
+               /// <throws>  IOException  If any error occurs. </throws>
+               /// <deprecated> Please implement {@link
+               /// FieldComparatorSource} directly, instead.
+               /// </deprecated>
+        [Obsolete("Please implement FieldComparatorSource directly, instead.")]
+               System.IComparable[] GetCustom(IndexReader reader, System.String field, SortComparator comparator);
+               
+               /// <summary> EXPERT: Generates an array of CacheEntry objects representing all items 
+               /// currently in the FieldCache.
+               /// <p/>
+               /// NOTE: These CacheEntry objects maintain a strong refrence to the 
+               /// Cached Values.  Maintaining refrences to a CacheEntry the IndexReader 
+               /// associated with it has garbage collected will prevent the Value itself
+               /// from being garbage collected when the Cache drops the WeakRefrence.
+               /// <p/>
+               /// <p/>
+               /// <b>EXPERIMENTAL API:</b> This API is considered extremely advanced 
+               /// and experimental.  It may be removed or altered w/o warning in future 
+               /// releases 
+               /// of Lucene.
+               /// <p/>
+               /// </summary>
+               CacheEntry[] GetCacheEntries();
+               
+               /// <summary> <p/>
+               /// EXPERT: Instructs the FieldCache to forcibly expunge all entries 
+               /// from the underlying caches.  This is intended only to be used for 
+               /// test methods as a way to ensure a known base state of the Cache 
+               /// (with out needing to rely on GC to free WeakReferences).  
+               /// It should not be relied on for "Cache maintenance" in general 
+               /// application code.
+               /// <p/>
+               /// <p/>
+               /// <b>EXPERIMENTAL API:</b> This API is considered extremely advanced 
+               /// and experimental.  It may be removed or altered w/o warning in future 
+               /// releases 
+               /// of Lucene.
+               /// <p/>
+               /// </summary>
+               void  PurgeAllCaches();
+
+        /// <summary>
+        /// Expert: drops all cache entries associated with this
+        /// reader.  NOTE: this reader must precisely match the
+        /// reader that the cache entry is keyed on. If you pass a
+        /// top-level reader, it usually will have no effect as
+        /// Lucene now caches at the segment reader level.
+        /// </summary>
+        void Purge(IndexReader r);
+               
+               /// <summary> If non-null, FieldCacheImpl will warn whenever
+               /// entries are created that are not sane according to
+               /// {@link Mono.Lucene.Net.Util.FieldCacheSanityChecker}.
+               /// </summary>
+               void  SetInfoStream(System.IO.StreamWriter stream);
+               
+               /// <summary>counterpart of {@link #SetInfoStream(PrintStream)} </summary>
+               System.IO.StreamWriter GetInfoStream();
+       }
+       
+       /// <summary> Marker interface as super-interface to all parsers. It
+       /// is used to specify a custom parser to {@link
+       /// SortField#SortField(String, FieldCache.Parser)}.
+       /// </summary>
+       public interface Parser
+       {
+       }
+       
+       /// <summary>Interface to parse bytes from document fields.</summary>
+       /// <seealso cref="FieldCache.GetBytes(IndexReader, String, FieldCache.ByteParser)">
+       /// </seealso>
+       public interface ByteParser:Parser
+       {
+               /// <summary>Return a single Byte representation of this field's value. </summary>
+               sbyte ParseByte(System.String string_Renamed);
+       }
+       
+       /// <summary>Interface to parse shorts from document fields.</summary>
+       /// <seealso cref="FieldCache.GetShorts(IndexReader, String, FieldCache.ShortParser)">
+       /// </seealso>
+       public interface ShortParser:Parser
+       {
+               /// <summary>Return a short representation of this field's value. </summary>
+               short ParseShort(System.String string_Renamed);
+       }
+       
+       /// <summary>Interface to parse ints from document fields.</summary>
+       /// <seealso cref="FieldCache.GetInts(IndexReader, String, FieldCache.IntParser)">
+       /// </seealso>
+       public interface IntParser:Parser
+       {
+               /// <summary>Return an integer representation of this field's value. </summary>
+               int ParseInt(System.String string_Renamed);
+       }
+       
+       /// <summary>Interface to parse floats from document fields.</summary>
+       /// <seealso cref="FieldCache.GetFloats(IndexReader, String, FieldCache.FloatParser)">
+       /// </seealso>
+       public interface FloatParser:Parser
+       {
+               /// <summary>Return an float representation of this field's value. </summary>
+               float ParseFloat(System.String string_Renamed);
+       }
+       
+       /// <summary>Interface to parse long from document fields.</summary>
+       /// <seealso cref="FieldCache.GetLongs(IndexReader, String, FieldCache.LongParser)">
+       /// </seealso>
+       /// <deprecated> Use {@link FieldCache.LongParser}, this will be removed in Lucene 3.0 
+       /// </deprecated>
+    [Obsolete("Use FieldCache.LongParser, this will be removed in Lucene 3.0")]
+       public interface LongParser:Parser
+       {
+               /// <summary>Return an long representation of this field's value. </summary>
+               long ParseLong(System.String string_Renamed);
+       }
+       
+       /// <summary>Interface to parse doubles from document fields.</summary>
+       /// <seealso cref="FieldCache.GetDoubles(IndexReader, String, FieldCache.DoubleParser)">
+       /// </seealso>
+       /// <deprecated> Use {@link FieldCache.DoubleParser}, this will be removed in Lucene 3.0 
+       /// </deprecated>
+    [Obsolete("Use FieldCache.DoubleParser, this will be removed in Lucene 3.0 ")]
+       public interface DoubleParser:Parser
+       {
+               /// <summary>Return an long representation of this field's value. </summary>
+               double ParseDouble(System.String string_Renamed);
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/FieldCacheImpl.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/FieldCacheImpl.cs
new file mode 100644 (file)
index 0000000..48e964a
--- /dev/null
@@ -0,0 +1,1073 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using NumericField = Mono.Lucene.Net.Documents.NumericField;
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+using Term = Mono.Lucene.Net.Index.Term;
+using TermDocs = Mono.Lucene.Net.Index.TermDocs;
+using TermEnum = Mono.Lucene.Net.Index.TermEnum;
+using FieldCacheSanityChecker = Mono.Lucene.Net.Util.FieldCacheSanityChecker;
+using StringHelper = Mono.Lucene.Net.Util.StringHelper;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary> Expert: The default cache implementation, storing all values in memory.
+       /// A WeakHashMap is used for storage.
+       /// 
+       /// <p/>Created: May 19, 2004 4:40:36 PM
+       /// 
+       /// </summary>
+       /// <since>   lucene 1.4
+       /// </since>
+       /// <version>  $Id: FieldCacheImpl.java 807572 2009-08-25 11:44:45Z mikemccand $
+       /// </version>
+       // TODO: change interface to FieldCache in 3.0 when removed
+       class FieldCacheImpl : ExtendedFieldCache_old.ExtendedFieldCache
+       {
+               
+               private System.Collections.IDictionary caches;
+               internal FieldCacheImpl()
+               {
+                       Init();
+               }
+               private void  Init()
+               {
+                       lock (this)
+                       {
+                System.Collections.Hashtable caches2 = new System.Collections.Hashtable(7);
+                caches2[System.Type.GetType("System.SByte")] = new ByteCache(this);
+                caches2[System.Type.GetType("System.Int16")] = new ShortCache(this);
+                caches2[System.Type.GetType("System.Int32")] = new IntCache(this);
+                caches2[System.Type.GetType("System.Single")] = new FloatCache(this);
+                caches2[System.Type.GetType("System.Int64")] = new LongCache(this);
+                caches2[System.Type.GetType("System.Double")] = new DoubleCache(this);
+                caches2[typeof(System.String)] = new StringCache(this);
+                caches2[typeof(StringIndex)] = new StringIndexCache(this);
+                caches2[typeof(System.IComparable)] = new CustomCache(this);
+                caches2[typeof(System.Object)] = new AutoCache(this);
+                caches = caches2;
+                       }
+               }
+               
+               public virtual void  PurgeAllCaches()
+               {
+                       Init();
+               }
+
+        public void Purge(IndexReader r)
+        {
+            foreach (Cache c in caches.Values)
+            {
+                c.Purge(r);
+            }
+        }
+               
+               public virtual CacheEntry[] GetCacheEntries()
+               {
+                       System.Collections.IList result = new System.Collections.ArrayList(17);
+                       System.Collections.IEnumerator outerKeys = caches.Keys.GetEnumerator();
+                       while (outerKeys.MoveNext())
+                       {
+                               System.Type cacheType = (System.Type) outerKeys.Current;
+                               Cache cache = (Cache) caches[cacheType];
+                               System.Collections.IEnumerator innerKeys = cache.readerCache.Keys.GetEnumerator();
+                               while (innerKeys.MoveNext())
+                               {
+                                       // we've now materialized a hard ref
+                                       System.Object readerKey = innerKeys.Current;
+                                       // innerKeys was backed by WeakHashMap, sanity check
+                                       // that it wasn't GCed before we made hard ref
+                                       if (null != readerKey && cache.readerCache.Contains(readerKey))
+                                       {
+                                               System.Collections.IDictionary innerCache = ((System.Collections.IDictionary) cache.readerCache[readerKey]);
+                                               System.Collections.IEnumerator entrySetIterator = new System.Collections.Hashtable(innerCache).GetEnumerator();
+                                               while (entrySetIterator.MoveNext())
+                                               {
+                                                       System.Collections.DictionaryEntry mapEntry = (System.Collections.DictionaryEntry) entrySetIterator.Current;
+                                                       Entry entry = (Entry) mapEntry.Key;
+                                                       result.Add(new CacheEntryImpl(readerKey, entry.field, cacheType, entry.type, entry.custom, entry.locale, mapEntry.Value));
+                                               }
+                                       }
+                               }
+                       }
+                       return (CacheEntry[]) new System.Collections.ArrayList(result).ToArray(typeof(CacheEntry));
+               }
+               
+               private sealed class CacheEntryImpl:CacheEntry
+               {
+                       /// <deprecated> Only needed because of Entry (ab)use by 
+                       /// FieldSortedHitQueue, remove when FieldSortedHitQueue 
+                       /// is removed
+                       /// </deprecated>
+            [Obsolete("Only needed because of Entry (ab)use by FieldSortedHitQueue, remove when FieldSortedHitQueue is removed")]
+                       private int sortFieldType;
+                       /// <deprecated> Only needed because of Entry (ab)use by 
+                       /// FieldSortedHitQueue, remove when FieldSortedHitQueue 
+                       /// is removed
+                       /// </deprecated>
+            [Obsolete("Only needed because of Entry (ab)use by FieldSortedHitQueue, remove when FieldSortedHitQueue is removed")]
+                       private System.Globalization.CultureInfo locale;
+                       
+                       private System.Object readerKey;
+                       private System.String fieldName;
+                       private System.Type cacheType;
+                       private System.Object custom;
+                       private System.Object value_Renamed;
+                       internal CacheEntryImpl(System.Object readerKey, System.String fieldName, System.Type cacheType, int sortFieldType, System.Object custom, System.Globalization.CultureInfo locale, System.Object value_Renamed)
+                       {
+                               this.readerKey = readerKey;
+                               this.fieldName = fieldName;
+                               this.cacheType = cacheType;
+                               this.sortFieldType = sortFieldType;
+                               this.custom = custom;
+                               this.locale = locale;
+                               this.value_Renamed = value_Renamed;
+                               
+                               // :HACK: for testing.
+                               //         if (null != locale || SortField.CUSTOM != sortFieldType) {
+                               //           throw new RuntimeException("Locale/sortFieldType: " + this);
+                               //         }
+                       }
+                       public override System.Object GetReaderKey()
+                       {
+                               return readerKey;
+                       }
+                       public override System.String GetFieldName()
+                       {
+                               return fieldName;
+                       }
+                       public override System.Type GetCacheType()
+                       {
+                               return cacheType;
+                       }
+                       public override System.Object GetCustom()
+                       {
+                               return custom;
+                       }
+                       public override System.Object GetValue()
+                       {
+                               return value_Renamed;
+                       }
+                       /// <summary> Adds warning to super.toString if Local or sortFieldType were specified</summary>
+                       /// <deprecated> Only needed because of Entry (ab)use by 
+                       /// FieldSortedHitQueue, remove when FieldSortedHitQueue 
+                       /// is removed
+                       /// </deprecated>
+            [Obsolete("Only needed because of Entry (ab)use by FieldSortedHitQueue, remove when FieldSortedHitQueue is removed")]
+                       public override System.String ToString()
+                       {
+                               System.String r = base.ToString();
+                               if (null != locale)
+                               {
+                                       r = r + "...!!!Locale:" + locale + "???";
+                               }
+                               if (SortField.CUSTOM != sortFieldType)
+                               {
+                                       r = r + "...!!!SortType:" + sortFieldType + "???";
+                               }
+                               return r;
+                       }
+               }
+               
+               /// <summary> Hack: When thrown from a Parser (NUMERIC_UTILS_* ones), this stops
+               /// processing terms and returns the current FieldCache
+               /// array.
+               /// </summary>
+               [Serializable]
+               internal sealed class StopFillCacheException:System.SystemException
+               {
+               }
+               
+               /// <summary>Expert: Internal cache. </summary>
+               internal abstract class Cache
+               {
+                       internal Cache()
+                       {
+                               this.wrapper = null;
+                       }
+                       
+                       internal Cache(FieldCache wrapper)
+                       {
+                               this.wrapper = wrapper;
+                       }
+                       
+                       internal FieldCache wrapper;
+
+            internal System.Collections.IDictionary readerCache = new SupportClass.WeakHashTable();
+                       
+                       protected internal abstract System.Object CreateValue(IndexReader reader, Entry key);
+
+            /** Remove this reader from the cache, if present. */
+            public void Purge(IndexReader r)
+            {
+                object readerKey = r.GetFieldCacheKey();
+                lock (readerCache)
+                {
+                    readerCache.Remove(readerKey);
+                }
+            }
+                       
+                       public virtual System.Object Get(IndexReader reader, Entry key)
+                       {
+                               System.Collections.IDictionary innerCache;
+                               System.Object value_Renamed;
+                               System.Object readerKey = reader.GetFieldCacheKey();
+                               lock (readerCache.SyncRoot)
+                               {
+                                       innerCache = (System.Collections.IDictionary) readerCache[readerKey];
+                                       if (innerCache == null)
+                                       {
+                                               innerCache = new System.Collections.Hashtable();
+                                               readerCache[readerKey] = innerCache;
+                                               value_Renamed = null;
+                                       }
+                                       else
+                                       {
+                                               value_Renamed = innerCache[key];
+                                       }
+                                       if (value_Renamed == null)
+                                       {
+                                               value_Renamed = new CreationPlaceholder();
+                                               innerCache[key] = value_Renamed;
+                                       }
+                               }
+                               if (value_Renamed is CreationPlaceholder)
+                               {
+                                       lock (value_Renamed)
+                                       {
+                                               CreationPlaceholder progress = (CreationPlaceholder) value_Renamed;
+                                               if (progress.value_Renamed == null)
+                                               {
+                                                       progress.value_Renamed = CreateValue(reader, key);
+                                                       lock (readerCache.SyncRoot)
+                                                       {
+                                                               innerCache[key] = progress.value_Renamed;
+                                                       }
+                                                       
+                                                       // Only check if key.custom (the parser) is
+                                                       // non-null; else, we check twice for a single
+                                                       // call to FieldCache.getXXX
+                                                       if (key.custom != null && wrapper != null)
+                                                       {
+                                                               System.IO.StreamWriter infoStream = wrapper.GetInfoStream();
+                                                               if (infoStream != null)
+                                                               {
+                                                                       PrintNewInsanity(infoStream, progress.value_Renamed);
+                                                               }
+                                                       }
+                                               }
+                                               return progress.value_Renamed;
+                                       }
+                               }
+                               return value_Renamed;
+                       }
+                       
+                       private void  PrintNewInsanity(System.IO.StreamWriter infoStream, System.Object value_Renamed)
+                       {
+                               FieldCacheSanityChecker.Insanity[] insanities = FieldCacheSanityChecker.CheckSanity(wrapper);
+                               for (int i = 0; i < insanities.Length; i++)
+                               {
+                                       FieldCacheSanityChecker.Insanity insanity = insanities[i];
+                                       CacheEntry[] entries = insanity.GetCacheEntries();
+                                       for (int j = 0; j < entries.Length; j++)
+                                       {
+                                               if (entries[j].GetValue() == value_Renamed)
+                                               {
+                                                       // OK this insanity involves our entry
+                                                       infoStream.WriteLine("WARNING: new FieldCache insanity created\nDetails: " + insanity.ToString());
+                                                       infoStream.WriteLine("\nStack:\n");
+                            infoStream.WriteLine(new System.Exception());
+                                                       break;
+                                               }
+                                       }
+                               }
+                       }
+               }
+               
+               /// <summary>Expert: Every composite-key in the internal cache is of this type. </summary>
+               protected internal class Entry
+               {
+                       internal System.String field; // which Fieldable
+                       /// <deprecated> Only (ab)used by FieldSortedHitQueue, 
+                       /// remove when FieldSortedHitQueue is removed
+                       /// </deprecated>
+            [Obsolete("Only (ab)used by FieldSortedHitQueue, remove when FieldSortedHitQueue is removed")]
+                       internal int type; // which SortField type
+                       internal System.Object custom; // which custom comparator or parser
+                       /// <deprecated> Only (ab)used by FieldSortedHitQueue, 
+                       /// remove when FieldSortedHitQueue is removed
+                       /// </deprecated>
+            [Obsolete("Only (ab)used by FieldSortedHitQueue, remove when FieldSortedHitQueue is removed")]
+                       internal System.Globalization.CultureInfo locale; // the locale we're sorting (if string)
+                       
+                       /// <deprecated> Only (ab)used by FieldSortedHitQueue, 
+                       /// remove when FieldSortedHitQueue is removed
+                       /// </deprecated>
+            [Obsolete("Only (ab)used by FieldSortedHitQueue, remove when FieldSortedHitQueue is removed")]
+                       internal Entry(System.String field, int type, System.Globalization.CultureInfo locale)
+                       {
+                               this.field = StringHelper.Intern(field);
+                               this.type = type;
+                               this.custom = null;
+                               this.locale = locale;
+                       }
+                       
+                       /// <summary>Creates one of these objects for a custom comparator/parser. </summary>
+                       internal Entry(System.String field, System.Object custom)
+                       {
+                               this.field = StringHelper.Intern(field);
+                               this.type = SortField.CUSTOM;
+                               this.custom = custom;
+                               this.locale = null;
+                       }
+                       
+                       /// <deprecated> Only (ab)used by FieldSortedHitQueue, 
+                       /// remove when FieldSortedHitQueue is removed
+                       /// </deprecated>
+            [Obsolete("Only (ab)used by FieldSortedHitQueue, remove when FieldSortedHitQueue is removed")]
+                       internal Entry(System.String field, int type, Parser parser)
+                       {
+                               this.field = StringHelper.Intern(field);
+                               this.type = type;
+                               this.custom = parser;
+                               this.locale = null;
+                       }
+                       
+                       /// <summary>Two of these are equal iff they reference the same field and type. </summary>
+                       public  override bool Equals(System.Object o)
+                       {
+                               if (o is Entry)
+                               {
+                                       Entry other = (Entry) o;
+                                       if ((System.Object) other.field == (System.Object) field && other.type == type)
+                                       {
+                                               if (other.locale == null?locale == null:other.locale.Equals(locale))
+                                               {
+                                                       if (other.custom == null)
+                                                       {
+                                                               if (custom == null)
+                                                                       return true;
+                                                       }
+                                                       else if (other.custom.Equals(custom))
+                                                       {
+                                                               return true;
+                                                       }
+                                               }
+                                       }
+                               }
+                               return false;
+                       }
+                       
+                       /// <summary>Composes a hashcode based on the field and type. </summary>
+                       public override int GetHashCode()
+                       {
+                               return field.GetHashCode() ^ type ^ (custom == null?0:custom.GetHashCode()) ^ (locale == null?0:locale.GetHashCode());
+                       }
+               }
+               
+               // inherit javadocs
+               public virtual sbyte[] GetBytes(IndexReader reader, System.String field)
+               {
+                       return GetBytes(reader, field, null);
+               }
+               
+               // inherit javadocs
+               public virtual sbyte[] GetBytes(IndexReader reader, System.String field, ByteParser parser)
+               {
+                       return (sbyte[]) ((Cache) caches[System.Type.GetType("System.SByte")]).Get(reader, new Entry(field, parser));
+               }
+               
+               internal sealed class ByteCache:Cache
+               {
+                       internal ByteCache(FieldCache wrapper):base(wrapper)
+                       {
+                       }
+                       protected internal override System.Object CreateValue(IndexReader reader, Entry entryKey)
+                       {
+                               Entry entry = (Entry) entryKey;
+                               System.String field = entry.field;
+                               ByteParser parser = (ByteParser) entry.custom;
+                               if (parser == null)
+                               {
+                                       return wrapper.GetBytes(reader, field, Mono.Lucene.Net.Search.FieldCache_Fields.DEFAULT_BYTE_PARSER);
+                               }
+                               sbyte[] retArray = new sbyte[reader.MaxDoc()];
+                               TermDocs termDocs = reader.TermDocs();
+                               TermEnum termEnum = reader.Terms(new Term(field));
+                               try
+                               {
+                                       do 
+                                       {
+                                               Term term = termEnum.Term();
+                                               if (term == null || (System.Object) term.Field() != (System.Object) field)
+                                                       break;
+                                               sbyte termval = parser.ParseByte(term.Text());
+                                               termDocs.Seek(termEnum);
+                                               while (termDocs.Next())
+                                               {
+                                                       retArray[termDocs.Doc()] = termval;
+                                               }
+                                       }
+                                       while (termEnum.Next());
+                               }
+                               catch (StopFillCacheException stop)
+                               {
+                               }
+                               finally
+                               {
+                                       termDocs.Close();
+                                       termEnum.Close();
+                               }
+                               return retArray;
+                       }
+               }
+               
+               
+               // inherit javadocs
+               public virtual short[] GetShorts(IndexReader reader, System.String field)
+               {
+                       return GetShorts(reader, field, null);
+               }
+               
+               // inherit javadocs
+               public virtual short[] GetShorts(IndexReader reader, System.String field, ShortParser parser)
+               {
+                       return (short[]) ((Cache) caches[System.Type.GetType("System.Int16")]).Get(reader, new Entry(field, parser));
+               }
+               
+               internal sealed class ShortCache:Cache
+               {
+                       internal ShortCache(FieldCache wrapper):base(wrapper)
+                       {
+                       }
+                       
+                       protected internal override System.Object CreateValue(IndexReader reader, Entry entryKey)
+                       {
+                               Entry entry = (Entry) entryKey;
+                               System.String field = entry.field;
+                               ShortParser parser = (ShortParser) entry.custom;
+                               if (parser == null)
+                               {
+                                       return wrapper.GetShorts(reader, field, Mono.Lucene.Net.Search.FieldCache_Fields.DEFAULT_SHORT_PARSER);
+                               }
+                               short[] retArray = new short[reader.MaxDoc()];
+                               TermDocs termDocs = reader.TermDocs();
+                               TermEnum termEnum = reader.Terms(new Term(field));
+                               try
+                               {
+                                       do 
+                                       {
+                                               Term term = termEnum.Term();
+                                               if (term == null || (System.Object) term.Field() != (System.Object) field)
+                                                       break;
+                                               short termval = parser.ParseShort(term.Text());
+                                               termDocs.Seek(termEnum);
+                                               while (termDocs.Next())
+                                               {
+                                                       retArray[termDocs.Doc()] = termval;
+                                               }
+                                       }
+                                       while (termEnum.Next());
+                               }
+                               catch (StopFillCacheException stop)
+                               {
+                               }
+                               finally
+                               {
+                                       termDocs.Close();
+                                       termEnum.Close();
+                               }
+                               return retArray;
+                       }
+               }
+               
+               
+               // inherit javadocs
+               public virtual int[] GetInts(IndexReader reader, System.String field)
+               {
+                       return GetInts(reader, field, null);
+               }
+               
+               // inherit javadocs
+               public virtual int[] GetInts(IndexReader reader, System.String field, IntParser parser)
+               {
+                       return (int[]) ((Cache) caches[System.Type.GetType("System.Int32")]).Get(reader, new Entry(field, parser));
+               }
+               
+               internal sealed class IntCache:Cache
+               {
+                       internal IntCache(FieldCache wrapper):base(wrapper)
+                       {
+                       }
+                       
+                       protected internal override System.Object CreateValue(IndexReader reader, Entry entryKey)
+                       {
+                               Entry entry = (Entry) entryKey;
+                               System.String field = entry.field;
+                               IntParser parser = (IntParser) entry.custom;
+                               if (parser == null)
+                               {
+                                       try
+                                       {
+                                               return wrapper.GetInts(reader, field, Mono.Lucene.Net.Search.FieldCache_Fields.DEFAULT_INT_PARSER);
+                                       }
+                                       catch (System.FormatException ne)
+                                       {
+                                               return wrapper.GetInts(reader, field, Mono.Lucene.Net.Search.FieldCache_Fields.NUMERIC_UTILS_INT_PARSER);
+                                       }
+                               }
+                               int[] retArray = null;
+                               TermDocs termDocs = reader.TermDocs();
+                               TermEnum termEnum = reader.Terms(new Term(field));
+                               try
+                               {
+                                       do 
+                                       {
+                                               Term term = termEnum.Term();
+                                               if (term == null || (System.Object) term.Field() != (System.Object) field)
+                                                       break;
+                                               int termval = parser.ParseInt(term.Text());
+                                               if (retArray == null)
+                                               // late init
+                                                       retArray = new int[reader.MaxDoc()];
+                                               termDocs.Seek(termEnum);
+                                               while (termDocs.Next())
+                                               {
+                                                       retArray[termDocs.Doc()] = termval;
+                                               }
+                                       }
+                                       while (termEnum.Next());
+                               }
+                               catch (StopFillCacheException stop)
+                               {
+                               }
+                               finally
+                               {
+                                       termDocs.Close();
+                                       termEnum.Close();
+                               }
+                               if (retArray == null)
+                               // no values
+                                       retArray = new int[reader.MaxDoc()];
+                               return retArray;
+                       }
+               }
+               
+               
+               
+               // inherit javadocs
+               public virtual float[] GetFloats(IndexReader reader, System.String field)
+               {
+                       return GetFloats(reader, field, null);
+               }
+               
+               // inherit javadocs
+               public virtual float[] GetFloats(IndexReader reader, System.String field, FloatParser parser)
+               {
+                       
+                       return (float[]) ((Cache) caches[System.Type.GetType("System.Single")]).Get(reader, new Entry(field, parser));
+               }
+               
+               internal sealed class FloatCache:Cache
+               {
+                       internal FloatCache(FieldCache wrapper):base(wrapper)
+                       {
+                       }
+                       
+                       protected internal override System.Object CreateValue(IndexReader reader, Entry entryKey)
+                       {
+                               Entry entry = (Entry) entryKey;
+                               System.String field = entry.field;
+                               FloatParser parser = (FloatParser) entry.custom;
+                               if (parser == null)
+                               {
+                                       try
+                                       {
+                                               return wrapper.GetFloats(reader, field, Mono.Lucene.Net.Search.FieldCache_Fields.DEFAULT_FLOAT_PARSER);
+                                       }
+                                       catch (System.FormatException ne)
+                                       {
+                                               return wrapper.GetFloats(reader, field, Mono.Lucene.Net.Search.FieldCache_Fields.NUMERIC_UTILS_FLOAT_PARSER);
+                                       }
+                               }
+                               float[] retArray = null;
+                               TermDocs termDocs = reader.TermDocs();
+                               TermEnum termEnum = reader.Terms(new Term(field));
+                               try
+                               {
+                                       do 
+                                       {
+                                               Term term = termEnum.Term();
+                                               if (term == null || (System.Object) term.Field() != (System.Object) field)
+                                                       break;
+                                               float termval = parser.ParseFloat(term.Text());
+                                               if (retArray == null)
+                                               // late init
+                                                       retArray = new float[reader.MaxDoc()];
+                                               termDocs.Seek(termEnum);
+                                               while (termDocs.Next())
+                                               {
+                                                       retArray[termDocs.Doc()] = termval;
+                                               }
+                                       }
+                                       while (termEnum.Next());
+                               }
+                               catch (StopFillCacheException stop)
+                               {
+                               }
+                               finally
+                               {
+                                       termDocs.Close();
+                                       termEnum.Close();
+                               }
+                               if (retArray == null)
+                               // no values
+                                       retArray = new float[reader.MaxDoc()];
+                               return retArray;
+                       }
+               }
+               
+               
+               
+               public virtual long[] GetLongs(IndexReader reader, System.String field)
+               {
+                       return GetLongs(reader, field, null);
+               }
+               
+               // inherit javadocs
+               public virtual long[] GetLongs(IndexReader reader, System.String field, Mono.Lucene.Net.Search.LongParser parser)
+               {
+                       return (long[]) ((Cache) caches[System.Type.GetType("System.Int64")]).Get(reader, new Entry(field, parser));
+               }
+               
+               /// <deprecated> Will be removed in 3.0, this is for binary compatibility only 
+               /// </deprecated>
+        [Obsolete("Will be removed in 3.0, this is for binary compatibility only ")]
+               public virtual long[] GetLongs(IndexReader reader, System.String field, Mono.Lucene.Net.Search.ExtendedFieldCache_old.LongParser parser)
+               {
+                       return (long[]) ((Cache) caches[System.Type.GetType("System.Int64")]).Get(reader, new Entry(field, parser));
+               }
+               
+               internal sealed class LongCache:Cache
+               {
+                       internal LongCache(FieldCache wrapper):base(wrapper)
+                       {
+                       }
+                       
+                       protected internal override System.Object CreateValue(IndexReader reader, Entry entryKey)
+                       {
+                               Entry entry = (Entry) entryKey;
+                               System.String field = entry.field;
+                               Mono.Lucene.Net.Search.LongParser parser = (Mono.Lucene.Net.Search.LongParser) entry.custom;
+                               if (parser == null)
+                               {
+                                       try
+                                       {
+                                               return wrapper.GetLongs(reader, field, Mono.Lucene.Net.Search.FieldCache_Fields.DEFAULT_LONG_PARSER);
+                                       }
+                                       catch (System.FormatException ne)
+                                       {
+                                               return wrapper.GetLongs(reader, field, Mono.Lucene.Net.Search.FieldCache_Fields.NUMERIC_UTILS_LONG_PARSER);
+                                       }
+                               }
+                               long[] retArray = null;
+                               TermDocs termDocs = reader.TermDocs();
+                               TermEnum termEnum = reader.Terms(new Term(field));
+                               try
+                               {
+                                       do 
+                                       {
+                                               Term term = termEnum.Term();
+                                               if (term == null || (System.Object) term.Field() != (System.Object) field)
+                                                       break;
+                                               long termval = parser.ParseLong(term.Text());
+                                               if (retArray == null)
+                                               // late init
+                                                       retArray = new long[reader.MaxDoc()];
+                                               termDocs.Seek(termEnum);
+                                               while (termDocs.Next())
+                                               {
+                                                       retArray[termDocs.Doc()] = termval;
+                                               }
+                                       }
+                                       while (termEnum.Next());
+                               }
+                               catch (StopFillCacheException stop)
+                               {
+                               }
+                               finally
+                               {
+                                       termDocs.Close();
+                                       termEnum.Close();
+                               }
+                               if (retArray == null)
+                               // no values
+                                       retArray = new long[reader.MaxDoc()];
+                               return retArray;
+                       }
+               }
+               
+               
+               // inherit javadocs
+               public virtual double[] GetDoubles(IndexReader reader, System.String field)
+               {
+                       return GetDoubles(reader, field, null);
+               }
+               
+               // inherit javadocs
+               public virtual double[] GetDoubles(IndexReader reader, System.String field, Mono.Lucene.Net.Search.DoubleParser parser)
+               {
+                       return (double[]) ((Cache) caches[System.Type.GetType("System.Double")]).Get(reader, new Entry(field, parser));
+               }
+               
+               /// <deprecated> Will be removed in 3.0, this is for binary compatibility only 
+               /// </deprecated>
+        [Obsolete("Will be removed in 3.0, this is for binary compatibility only ")]
+               public virtual double[] GetDoubles(IndexReader reader, System.String field, Mono.Lucene.Net.Search.ExtendedFieldCache_old.DoubleParser parser)
+               {
+                       return (double[]) ((Cache) caches[System.Type.GetType("System.Double")]).Get(reader, new Entry(field, parser));
+               }
+               
+               internal sealed class DoubleCache:Cache
+               {
+                       internal DoubleCache(FieldCache wrapper):base(wrapper)
+                       {
+                       }
+                       
+                       protected internal override System.Object CreateValue(IndexReader reader, Entry entryKey)
+                       {
+                               Entry entry = (Entry) entryKey;
+                               System.String field = entry.field;
+                               Mono.Lucene.Net.Search.DoubleParser parser = (Mono.Lucene.Net.Search.DoubleParser) entry.custom;
+                               if (parser == null)
+                               {
+                                       try
+                                       {
+                                               return wrapper.GetDoubles(reader, field, Mono.Lucene.Net.Search.FieldCache_Fields.DEFAULT_DOUBLE_PARSER);
+                                       }
+                                       catch (System.FormatException ne)
+                                       {
+                                               return wrapper.GetDoubles(reader, field, Mono.Lucene.Net.Search.FieldCache_Fields.NUMERIC_UTILS_DOUBLE_PARSER);
+                                       }
+                               }
+                               double[] retArray = null;
+                               TermDocs termDocs = reader.TermDocs();
+                               TermEnum termEnum = reader.Terms(new Term(field));
+                               try
+                               {
+                                       do 
+                                       {
+                                               Term term = termEnum.Term();
+                                               if (term == null || (System.Object) term.Field() != (System.Object) field)
+                                                       break;
+                                               double termval = parser.ParseDouble(term.Text());
+                                               if (retArray == null)
+                                               // late init
+                                                       retArray = new double[reader.MaxDoc()];
+                                               termDocs.Seek(termEnum);
+                                               while (termDocs.Next())
+                                               {
+                                                       retArray[termDocs.Doc()] = termval;
+                                               }
+                                       }
+                                       while (termEnum.Next());
+                               }
+                               catch (StopFillCacheException stop)
+                               {
+                               }
+                               finally
+                               {
+                                       termDocs.Close();
+                                       termEnum.Close();
+                               }
+                               if (retArray == null)
+                               // no values
+                                       retArray = new double[reader.MaxDoc()];
+                               return retArray;
+                       }
+               }
+               
+               
+               // inherit javadocs
+               public virtual System.String[] GetStrings(IndexReader reader, System.String field)
+               {
+                       return (System.String[]) ((Cache) caches[typeof(System.String)]).Get(reader, new Entry(field, (Parser) null));
+               }
+               
+               internal sealed class StringCache:Cache
+               {
+                       internal StringCache(FieldCache wrapper):base(wrapper)
+                       {
+                       }
+                       
+                       protected internal override System.Object CreateValue(IndexReader reader, Entry entryKey)
+                       {
+                               System.String field = StringHelper.Intern((System.String) entryKey.field);
+                               System.String[] retArray = new System.String[reader.MaxDoc()];
+                               TermDocs termDocs = reader.TermDocs();
+                               TermEnum termEnum = reader.Terms(new Term(field));
+                               try
+                               {
+                                       do 
+                                       {
+                                               Term term = termEnum.Term();
+                                               if (term == null || (System.Object) term.Field() != (System.Object) field)
+                                                       break;
+                                               System.String termval = term.Text();
+                                               termDocs.Seek(termEnum);
+                                               while (termDocs.Next())
+                                               {
+                                                       retArray[termDocs.Doc()] = termval;
+                                               }
+                                       }
+                                       while (termEnum.Next());
+                               }
+                               finally
+                               {
+                                       termDocs.Close();
+                                       termEnum.Close();
+                               }
+                               return retArray;
+                       }
+               }
+               
+               
+               // inherit javadocs
+               public virtual StringIndex GetStringIndex(IndexReader reader, System.String field)
+               {
+                       return (StringIndex) ((Cache) caches[typeof(StringIndex)]).Get(reader, new Entry(field, (Parser) null));
+               }
+               
+               internal sealed class StringIndexCache:Cache
+               {
+                       internal StringIndexCache(FieldCache wrapper):base(wrapper)
+                       {
+                       }
+                       
+                       protected internal override System.Object CreateValue(IndexReader reader, Entry entryKey)
+                       {
+                               System.String field = StringHelper.Intern((System.String) entryKey.field);
+                               int[] retArray = new int[reader.MaxDoc()];
+                               System.String[] mterms = new System.String[reader.MaxDoc() + 1];
+                               TermDocs termDocs = reader.TermDocs();
+                               TermEnum termEnum = reader.Terms(new Term(field));
+                               int t = 0; // current term number
+                               
+                               // an entry for documents that have no terms in this field
+                               // should a document with no terms be at top or bottom?
+                               // this puts them at the top - if it is changed, FieldDocSortedHitQueue
+                               // needs to change as well.
+                               mterms[t++] = null;
+                               
+                               try
+                               {
+                                       do 
+                                       {
+                                               Term term = termEnum.Term();
+                        if (term == null || term.Field() != field || t >= mterms.Length) break;
+                                               
+                                               // store term text
+                                               mterms[t] = term.Text();
+                                               
+                                               termDocs.Seek(termEnum);
+                                               while (termDocs.Next())
+                                               {
+                                                       retArray[termDocs.Doc()] = t;
+                                               }
+                                               
+                                               t++;
+                                       }
+                                       while (termEnum.Next());
+                               }
+                               finally
+                               {
+                                       termDocs.Close();
+                                       termEnum.Close();
+                               }
+                               
+                               if (t == 0)
+                               {
+                                       // if there are no terms, make the term array
+                                       // have a single null entry
+                                       mterms = new System.String[1];
+                               }
+                               else if (t < mterms.Length)
+                               {
+                                       // if there are less terms than documents,
+                                       // trim off the dead array space
+                                       System.String[] terms = new System.String[t];
+                                       Array.Copy(mterms, 0, terms, 0, t);
+                                       mterms = terms;
+                               }
+                               
+                               StringIndex value_Renamed = new StringIndex(retArray, mterms);
+                               return value_Renamed;
+                       }
+               }
+               
+               
+               /// <summary>The pattern used to detect integer values in a field </summary>
+               /// <summary>removed for java 1.3 compatibility
+               /// protected static final Pattern pIntegers = Pattern.compile ("[0-9\\-]+");
+               /// 
+               /// </summary>
+               
+               /// <summary>The pattern used to detect float values in a field </summary>
+               /// <summary> removed for java 1.3 compatibility
+               /// protected static final Object pFloats = Pattern.compile ("[0-9+\\-\\.eEfFdD]+");
+               /// </summary>
+               
+               // inherit javadocs
+               public virtual System.Object GetAuto(IndexReader reader, System.String field)
+               {
+                       return ((Cache) caches[typeof(System.Object)]).Get(reader, new Entry(field, (Parser) null));
+               }
+               
+               /// <deprecated> Please specify the exact type, instead.
+               /// Especially, guessing does <b>not</b> work with the new
+               /// {@link NumericField} type.
+               /// </deprecated>
+        [Obsolete("Please specify the exact type, instead. Especially, guessing does not work with the new NumericField type.")]
+               internal sealed class AutoCache:Cache
+               {
+                       internal AutoCache(FieldCache wrapper):base(wrapper)
+                       {
+                       }
+                       
+                       protected internal override System.Object CreateValue(IndexReader reader, Entry entryKey)
+                       {
+                               System.String field = StringHelper.Intern((System.String) entryKey.field);
+                               TermEnum enumerator = reader.Terms(new Term(field));
+                               try
+                               {
+                                       Term term = enumerator.Term();
+                                       if (term == null)
+                                       {
+                                               throw new System.SystemException("no terms in field " + field + " - cannot determine type");
+                                       }
+                                       System.Object ret = null;
+                                       if ((System.Object) term.Field() == (System.Object) field)
+                                       {
+                                               System.String termtext = term.Text().Trim();
+                                               
+                                               try
+                                               {
+                                                       System.Int32.Parse(termtext);
+                                                       ret = wrapper.GetInts(reader, field);
+                                               }
+                                               catch (System.FormatException nfe1)
+                                               {
+                                                       try
+                                                       {
+                                                               System.Int64.Parse(termtext);
+                                                               ret = wrapper.GetLongs(reader, field);
+                                                       }
+                                                       catch (System.FormatException nfe2)
+                                                       {
+                                                               try
+                                                               {
+                                    SupportClass.Single.Parse(termtext);
+                                                                       ret = wrapper.GetFloats(reader, field);
+                                                               }
+                                                               catch (System.FormatException nfe3)
+                                                               {
+                                                                       ret = wrapper.GetStringIndex(reader, field);
+                                                               }
+                                                       }
+                                               }
+                                       }
+                                       else
+                                       {
+                                               throw new System.SystemException("field \"" + field + "\" does not appear to be indexed");
+                                       }
+                                       return ret;
+                               }
+                               finally
+                               {
+                                       enumerator.Close();
+                               }
+                       }
+               }
+               
+               
+               /// <deprecated> 
+               /// </deprecated>
+        [Obsolete]
+               public virtual System.IComparable[] GetCustom(IndexReader reader, System.String field, SortComparator comparator)
+               {
+                       return (System.IComparable[]) ((Cache) caches[typeof(System.IComparable)]).Get(reader, new Entry(field, comparator));
+               }
+               
+               /// <deprecated> 
+               /// </deprecated>
+        [Obsolete]
+               internal sealed class CustomCache:Cache
+               {
+                       internal CustomCache(FieldCache wrapper):base(wrapper)
+                       {
+                       }
+                       
+                       protected internal override System.Object CreateValue(IndexReader reader, Entry entryKey)
+                       {
+                               Entry entry = (Entry) entryKey;
+                               System.String field = entry.field;
+                               SortComparator comparator = (SortComparator) entry.custom;
+                               System.IComparable[] retArray = new System.IComparable[reader.MaxDoc()];
+                               TermDocs termDocs = reader.TermDocs();
+                               TermEnum termEnum = reader.Terms(new Term(field));
+                               try
+                               {
+                                       do 
+                                       {
+                                               Term term = termEnum.Term();
+                                               if (term == null || (System.Object) term.Field() != (System.Object) field)
+                                                       break;
+                                               System.IComparable termval = comparator.GetComparable(term.Text());
+                                               termDocs.Seek(termEnum);
+                                               while (termDocs.Next())
+                                               {
+                                                       retArray[termDocs.Doc()] = termval;
+                                               }
+                                       }
+                                       while (termEnum.Next());
+                               }
+                               finally
+                               {
+                                       termDocs.Close();
+                                       termEnum.Close();
+                               }
+                               return retArray;
+                       }
+               }
+               
+               
+               private volatile System.IO.StreamWriter infoStream;
+               
+               public virtual void  SetInfoStream(System.IO.StreamWriter stream)
+               {
+                       infoStream = stream;
+               }
+               
+               public virtual System.IO.StreamWriter GetInfoStream()
+               {
+                       return infoStream;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/FieldCacheRangeFilter.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/FieldCacheRangeFilter.cs
new file mode 100644 (file)
index 0000000..02bb333
--- /dev/null
@@ -0,0 +1,966 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using NumericField = Mono.Lucene.Net.Documents.NumericField;
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+using TermDocs = Mono.Lucene.Net.Index.TermDocs;
+using NumericUtils = Mono.Lucene.Net.Util.NumericUtils;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary> A range filter built on top of a cached single term field (in {@link FieldCache}).
+       /// 
+       /// <p/>FieldCacheRangeFilter builds a single cache for the field the first time it is used.
+       /// Each subsequent FieldCacheRangeFilter on the same field then reuses this cache,
+       /// even if the range itself changes. 
+       /// 
+       /// <p/>This means that FieldCacheRangeFilter is much faster (sometimes more than 100x as fast) 
+       /// as building a {@link TermRangeFilter} (or {@link ConstantScoreRangeQuery} on a {@link TermRangeFilter})
+       /// for each query, if using a {@link #newStringRange}. However, if the range never changes it
+       /// is slower (around 2x as slow) than building a CachingWrapperFilter on top of a single TermRangeFilter.
+       /// 
+       /// For numeric data types, this filter may be significantly faster than {@link NumericRangeFilter}.
+       /// Furthermore, it does not need the numeric values encoded by {@link NumericField}. But
+       /// it has the problem that it only works with exact one value/document (see below).
+       /// 
+       /// <p/>As with all {@link FieldCache} based functionality, FieldCacheRangeFilter is only valid for 
+       /// fields which exact one term for each document (except for {@link #newStringRange}
+       /// where 0 terms are also allowed). Due to a restriction of {@link FieldCache}, for numeric ranges
+       /// all terms that do not have a numeric value, 0 is assumed.
+       /// 
+       /// <p/>Thus it works on dates, prices and other single value fields but will not work on
+       /// regular text fields. It is preferable to use a <code>NOT_ANALYZED</code> field to ensure that
+       /// there is only a single term. 
+       /// 
+       /// <p/>This class does not have an constructor, use one of the static factory methods available,
+       /// that create a correct instance for different data types supported by {@link FieldCache}.
+       /// </summary>
+       
+       [Serializable]
+       public abstract class FieldCacheRangeFilter:Filter
+       {
+               [Serializable]
+               private class AnonymousClassFieldCacheRangeFilter:FieldCacheRangeFilter
+               {
+                       private class AnonymousClassFieldCacheDocIdSet:FieldCacheDocIdSet
+                       {
+                               private void  InitBlock(Mono.Lucene.Net.Search.StringIndex fcsi, int inclusiveLowerPoint, int inclusiveUpperPoint, AnonymousClassFieldCacheRangeFilter enclosingInstance)
+                               {
+                                       this.fcsi = fcsi;
+                                       this.inclusiveLowerPoint = inclusiveLowerPoint;
+                                       this.inclusiveUpperPoint = inclusiveUpperPoint;
+                                       this.enclosingInstance = enclosingInstance;
+                               }
+                               private Mono.Lucene.Net.Search.StringIndex fcsi;
+                               private int inclusiveLowerPoint;
+                               private int inclusiveUpperPoint;
+                               private AnonymousClassFieldCacheRangeFilter enclosingInstance;
+                               public AnonymousClassFieldCacheRangeFilter Enclosing_Instance
+                               {
+                                       get
+                                       {
+                                               return enclosingInstance;
+                                       }
+                                       
+                               }
+                               internal AnonymousClassFieldCacheDocIdSet(Mono.Lucene.Net.Search.StringIndex fcsi, int inclusiveLowerPoint, int inclusiveUpperPoint, AnonymousClassFieldCacheRangeFilter enclosingInstance, Mono.Lucene.Net.Index.IndexReader Param1, bool Param2):base(Param1, Param2)
+                               {
+                                       InitBlock(fcsi, inclusiveLowerPoint, inclusiveUpperPoint, enclosingInstance);
+                               }
+                               internal override bool MatchDoc(int doc)
+                               {
+                                       return fcsi.order[doc] >= inclusiveLowerPoint && fcsi.order[doc] <= inclusiveUpperPoint;
+                               }
+                       }
+                       internal AnonymousClassFieldCacheRangeFilter(System.String Param1, Mono.Lucene.Net.Search.Parser Param2, System.Object Param3, System.Object Param4, bool Param5, bool Param6):base(Param1, Param2, Param3, Param4, Param5, Param6)
+                       {
+                       }
+                       public override DocIdSet GetDocIdSet(IndexReader reader)
+                       {
+                               Mono.Lucene.Net.Search.StringIndex fcsi = Mono.Lucene.Net.Search.FieldCache_Fields.DEFAULT.GetStringIndex(reader, field);
+                               int lowerPoint = fcsi.BinarySearchLookup((System.String) lowerVal);
+                               int upperPoint = fcsi.BinarySearchLookup((System.String) upperVal);
+                               
+                               int inclusiveLowerPoint;
+                               int inclusiveUpperPoint;
+                               
+                               // Hints:
+                               // * binarySearchLookup returns 0, if value was null.
+                               // * the value is <0 if no exact hit was found, the returned value
+                               //   is (-(insertion point) - 1)
+                               if (lowerPoint == 0)
+                               {
+                                       System.Diagnostics.Debug.Assert(lowerVal == null);
+                                       inclusiveLowerPoint = 1;
+                               }
+                               else if (includeLower && lowerPoint > 0)
+                               {
+                                       inclusiveLowerPoint = lowerPoint;
+                               }
+                               else if (lowerPoint > 0)
+                               {
+                                       inclusiveLowerPoint = lowerPoint + 1;
+                               }
+                               else
+                               {
+                                       inclusiveLowerPoint = System.Math.Max(1, - lowerPoint - 1);
+                               }
+                               
+                               if (upperPoint == 0)
+                               {
+                                       System.Diagnostics.Debug.Assert(upperVal == null);
+                                       inclusiveUpperPoint = System.Int32.MaxValue;
+                               }
+                               else if (includeUpper && upperPoint > 0)
+                               {
+                                       inclusiveUpperPoint = upperPoint;
+                               }
+                               else if (upperPoint > 0)
+                               {
+                                       inclusiveUpperPoint = upperPoint - 1;
+                               }
+                               else
+                               {
+                                       inclusiveUpperPoint = - upperPoint - 2;
+                               }
+                               
+                               if (inclusiveUpperPoint <= 0 || inclusiveLowerPoint > inclusiveUpperPoint)
+                                       return DocIdSet.EMPTY_DOCIDSET;
+                               
+                               System.Diagnostics.Debug.Assert(inclusiveLowerPoint > 0 && inclusiveUpperPoint > 0);
+                               
+                               // for this DocIdSet, we never need to use TermDocs,
+                               // because deleted docs have an order of 0 (null entry in StringIndex)
+                               return new AnonymousClassFieldCacheDocIdSet(fcsi, inclusiveLowerPoint, inclusiveUpperPoint, this, reader, false);
+                       }
+               }
+               [Serializable]
+               private class AnonymousClassFieldCacheRangeFilter1:FieldCacheRangeFilter
+               {
+                       private class AnonymousClassFieldCacheDocIdSet:FieldCacheDocIdSet
+                       {
+                               private void  InitBlock(sbyte[] values, byte inclusiveLowerPoint, byte inclusiveUpperPoint, AnonymousClassFieldCacheRangeFilter1 enclosingInstance)
+                               {
+                                       this.values = values;
+                                       this.inclusiveLowerPoint = inclusiveLowerPoint;
+                                       this.inclusiveUpperPoint = inclusiveUpperPoint;
+                                       this.enclosingInstance = enclosingInstance;
+                               }
+                               private sbyte[] values;
+                               private byte inclusiveLowerPoint;
+                               private byte inclusiveUpperPoint;
+                               private AnonymousClassFieldCacheRangeFilter1 enclosingInstance;
+                               public AnonymousClassFieldCacheRangeFilter1 Enclosing_Instance
+                               {
+                                       get
+                                       {
+                                               return enclosingInstance;
+                                       }
+                                       
+                               }
+                               internal AnonymousClassFieldCacheDocIdSet(sbyte[] values, byte inclusiveLowerPoint, byte inclusiveUpperPoint, AnonymousClassFieldCacheRangeFilter1 enclosingInstance, Mono.Lucene.Net.Index.IndexReader Param1, bool Param2):base(Param1, Param2)
+                               {
+                                       InitBlock(values, inclusiveLowerPoint, inclusiveUpperPoint, enclosingInstance);
+                               }
+                               internal override bool MatchDoc(int doc)
+                               {
+                                       return values[doc] >= inclusiveLowerPoint && values[doc] <= inclusiveUpperPoint;
+                               }
+                       }
+                       internal AnonymousClassFieldCacheRangeFilter1(System.String Param1, Mono.Lucene.Net.Search.Parser Param2, System.Object Param3, System.Object Param4, bool Param5, bool Param6):base(Param1, Param2, Param3, Param4, Param5, Param6)
+                       {
+                       }
+                       public override DocIdSet GetDocIdSet(IndexReader reader)
+                       {
+                               byte inclusiveLowerPoint;
+                               byte inclusiveUpperPoint;
+                               if (lowerVal != null)
+                               {
+                                       byte i = (byte) System.Convert.ToSByte(((System.ValueType) lowerVal));
+                                       if (!includeLower && i == (byte) System.Byte.MaxValue)
+                                               return DocIdSet.EMPTY_DOCIDSET;
+                                       inclusiveLowerPoint = (byte) (includeLower?i:(i + 1));
+                               }
+                               else
+                               {
+                                       inclusiveLowerPoint = (byte) System.Byte.MinValue;
+                               }
+                               if (upperVal != null)
+                               {
+                                       byte i = (byte) System.Convert.ToSByte(((System.ValueType) upperVal));
+                                       if (!includeUpper && i == (byte) System.Byte.MinValue)
+                                               return DocIdSet.EMPTY_DOCIDSET;
+                                       inclusiveUpperPoint = (byte) (includeUpper?i:(i - 1));
+                               }
+                               else
+                               {
+                                       inclusiveUpperPoint = (byte) System.Byte.MaxValue;
+                               }
+                               
+                               if (inclusiveLowerPoint > inclusiveUpperPoint)
+                                       return DocIdSet.EMPTY_DOCIDSET;
+                               
+                               sbyte[] values = Mono.Lucene.Net.Search.FieldCache_Fields.DEFAULT.GetBytes(reader, field, (Mono.Lucene.Net.Search.ByteParser) parser);
+                               // we only request the usage of termDocs, if the range contains 0
+                               return new AnonymousClassFieldCacheDocIdSet(values, inclusiveLowerPoint, inclusiveUpperPoint, this, reader, (inclusiveLowerPoint <= 0 && inclusiveUpperPoint >= 0));
+                       }
+               }
+               [Serializable]
+               private class AnonymousClassFieldCacheRangeFilter2:FieldCacheRangeFilter
+               {
+                       private class AnonymousClassFieldCacheDocIdSet:FieldCacheDocIdSet
+                       {
+                               private void  InitBlock(short[] values, short inclusiveLowerPoint, short inclusiveUpperPoint, AnonymousClassFieldCacheRangeFilter2 enclosingInstance)
+                               {
+                                       this.values = values;
+                                       this.inclusiveLowerPoint = inclusiveLowerPoint;
+                                       this.inclusiveUpperPoint = inclusiveUpperPoint;
+                                       this.enclosingInstance = enclosingInstance;
+                               }
+                               private short[] values;
+                               private short inclusiveLowerPoint;
+                               private short inclusiveUpperPoint;
+                               private AnonymousClassFieldCacheRangeFilter2 enclosingInstance;
+                               public AnonymousClassFieldCacheRangeFilter2 Enclosing_Instance
+                               {
+                                       get
+                                       {
+                                               return enclosingInstance;
+                                       }
+                                       
+                               }
+                               internal AnonymousClassFieldCacheDocIdSet(short[] values, short inclusiveLowerPoint, short inclusiveUpperPoint, AnonymousClassFieldCacheRangeFilter2 enclosingInstance, Mono.Lucene.Net.Index.IndexReader Param1, bool Param2):base(Param1, Param2)
+                               {
+                                       InitBlock(values, inclusiveLowerPoint, inclusiveUpperPoint, enclosingInstance);
+                               }
+                               internal override bool MatchDoc(int doc)
+                               {
+                                       return values[doc] >= inclusiveLowerPoint && values[doc] <= inclusiveUpperPoint;
+                               }
+                       }
+                       internal AnonymousClassFieldCacheRangeFilter2(System.String Param1, Mono.Lucene.Net.Search.Parser Param2, System.Object Param3, System.Object Param4, bool Param5, bool Param6):base(Param1, Param2, Param3, Param4, Param5, Param6)
+                       {
+                       }
+                       public override DocIdSet GetDocIdSet(IndexReader reader)
+                       {
+                               short inclusiveLowerPoint;
+                               short inclusiveUpperPoint;
+                               if (lowerVal != null)
+                               {
+                                       short i = System.Convert.ToInt16(((System.ValueType) lowerVal));
+                                       if (!includeLower && i == System.Int16.MaxValue)
+                                               return DocIdSet.EMPTY_DOCIDSET;
+                                       inclusiveLowerPoint = (short) (includeLower?i:(i + 1));
+                               }
+                               else
+                               {
+                                       inclusiveLowerPoint = System.Int16.MinValue;
+                               }
+                               if (upperVal != null)
+                               {
+                                       short i = System.Convert.ToInt16(((System.ValueType) upperVal));
+                                       if (!includeUpper && i == System.Int16.MinValue)
+                                               return DocIdSet.EMPTY_DOCIDSET;
+                                       inclusiveUpperPoint = (short) (includeUpper?i:(i - 1));
+                               }
+                               else
+                               {
+                                       inclusiveUpperPoint = System.Int16.MaxValue;
+                               }
+                               
+                               if (inclusiveLowerPoint > inclusiveUpperPoint)
+                                       return DocIdSet.EMPTY_DOCIDSET;
+                               
+                               short[] values = Mono.Lucene.Net.Search.FieldCache_Fields.DEFAULT.GetShorts(reader, field, (Mono.Lucene.Net.Search.ShortParser) parser);
+                               // we only request the usage of termDocs, if the range contains 0
+                               return new AnonymousClassFieldCacheDocIdSet(values, inclusiveLowerPoint, inclusiveUpperPoint, this, reader, (inclusiveLowerPoint <= 0 && inclusiveUpperPoint >= 0));
+                       }
+               }
+               [Serializable]
+               private class AnonymousClassFieldCacheRangeFilter3:FieldCacheRangeFilter
+               {
+                       private class AnonymousClassFieldCacheDocIdSet:FieldCacheDocIdSet
+                       {
+                               private void  InitBlock(int[] values, int inclusiveLowerPoint, int inclusiveUpperPoint, AnonymousClassFieldCacheRangeFilter3 enclosingInstance)
+                               {
+                                       this.values = values;
+                                       this.inclusiveLowerPoint = inclusiveLowerPoint;
+                                       this.inclusiveUpperPoint = inclusiveUpperPoint;
+                                       this.enclosingInstance = enclosingInstance;
+                               }
+                               private int[] values;
+                               private int inclusiveLowerPoint;
+                               private int inclusiveUpperPoint;
+                               private AnonymousClassFieldCacheRangeFilter3 enclosingInstance;
+                               public AnonymousClassFieldCacheRangeFilter3 Enclosing_Instance
+                               {
+                                       get
+                                       {
+                                               return enclosingInstance;
+                                       }
+                                       
+                               }
+                               internal AnonymousClassFieldCacheDocIdSet(int[] values, int inclusiveLowerPoint, int inclusiveUpperPoint, AnonymousClassFieldCacheRangeFilter3 enclosingInstance, Mono.Lucene.Net.Index.IndexReader Param1, bool Param2):base(Param1, Param2)
+                               {
+                                       InitBlock(values, inclusiveLowerPoint, inclusiveUpperPoint, enclosingInstance);
+                               }
+                               internal override bool MatchDoc(int doc)
+                               {
+                                       return values[doc] >= inclusiveLowerPoint && values[doc] <= inclusiveUpperPoint;
+                               }
+                       }
+                       internal AnonymousClassFieldCacheRangeFilter3(System.String Param1, Mono.Lucene.Net.Search.Parser Param2, System.Object Param3, System.Object Param4, bool Param5, bool Param6):base(Param1, Param2, Param3, Param4, Param5, Param6)
+                       {
+                       }
+                       public override DocIdSet GetDocIdSet(IndexReader reader)
+                       {
+                               int inclusiveLowerPoint;
+                               int inclusiveUpperPoint;
+                               if (lowerVal != null)
+                               {
+                                       int i = System.Convert.ToInt32(((System.ValueType) lowerVal));
+                                       if (!includeLower && i == System.Int32.MaxValue)
+                                               return DocIdSet.EMPTY_DOCIDSET;
+                                       inclusiveLowerPoint = includeLower?i:(i + 1);
+                               }
+                               else
+                               {
+                                       inclusiveLowerPoint = System.Int32.MinValue;
+                               }
+                               if (upperVal != null)
+                               {
+                                       int i = System.Convert.ToInt32(((System.ValueType) upperVal));
+                                       if (!includeUpper && i == System.Int32.MinValue)
+                                               return DocIdSet.EMPTY_DOCIDSET;
+                                       inclusiveUpperPoint = includeUpper?i:(i - 1);
+                               }
+                               else
+                               {
+                                       inclusiveUpperPoint = System.Int32.MaxValue;
+                               }
+                               
+                               if (inclusiveLowerPoint > inclusiveUpperPoint)
+                                       return DocIdSet.EMPTY_DOCIDSET;
+                               
+                               int[] values = Mono.Lucene.Net.Search.FieldCache_Fields.DEFAULT.GetInts(reader, field, (Mono.Lucene.Net.Search.IntParser) parser);
+                               // we only request the usage of termDocs, if the range contains 0
+                               return new AnonymousClassFieldCacheDocIdSet(values, inclusiveLowerPoint, inclusiveUpperPoint, this, reader, (inclusiveLowerPoint <= 0 && inclusiveUpperPoint >= 0));
+                       }
+               }
+               [Serializable]
+               private class AnonymousClassFieldCacheRangeFilter4:FieldCacheRangeFilter
+               {
+                       private class AnonymousClassFieldCacheDocIdSet:FieldCacheDocIdSet
+                       {
+                               private void  InitBlock(long[] values, long inclusiveLowerPoint, long inclusiveUpperPoint, AnonymousClassFieldCacheRangeFilter4 enclosingInstance)
+                               {
+                                       this.values = values;
+                                       this.inclusiveLowerPoint = inclusiveLowerPoint;
+                                       this.inclusiveUpperPoint = inclusiveUpperPoint;
+                                       this.enclosingInstance = enclosingInstance;
+                               }
+                               private long[] values;
+                               private long inclusiveLowerPoint;
+                               private long inclusiveUpperPoint;
+                               private AnonymousClassFieldCacheRangeFilter4 enclosingInstance;
+                               public AnonymousClassFieldCacheRangeFilter4 Enclosing_Instance
+                               {
+                                       get
+                                       {
+                                               return enclosingInstance;
+                                       }
+                                       
+                               }
+                               internal AnonymousClassFieldCacheDocIdSet(long[] values, long inclusiveLowerPoint, long inclusiveUpperPoint, AnonymousClassFieldCacheRangeFilter4 enclosingInstance, Mono.Lucene.Net.Index.IndexReader Param1, bool Param2):base(Param1, Param2)
+                               {
+                                       InitBlock(values, inclusiveLowerPoint, inclusiveUpperPoint, enclosingInstance);
+                               }
+                               internal override bool MatchDoc(int doc)
+                               {
+                                       return values[doc] >= inclusiveLowerPoint && values[doc] <= inclusiveUpperPoint;
+                               }
+                       }
+                       internal AnonymousClassFieldCacheRangeFilter4(System.String Param1, Mono.Lucene.Net.Search.Parser Param2, System.Object Param3, System.Object Param4, bool Param5, bool Param6):base(Param1, Param2, Param3, Param4, Param5, Param6)
+                       {
+                       }
+                       public override DocIdSet GetDocIdSet(IndexReader reader)
+                       {
+                               long inclusiveLowerPoint;
+                               long inclusiveUpperPoint;
+                               if (lowerVal != null)
+                               {
+                                       long i = System.Convert.ToInt64(((System.ValueType) lowerVal));
+                                       if (!includeLower && i == System.Int64.MaxValue)
+                                               return DocIdSet.EMPTY_DOCIDSET;
+                                       inclusiveLowerPoint = includeLower?i:(i + 1L);
+                               }
+                               else
+                               {
+                                       inclusiveLowerPoint = System.Int64.MinValue;
+                               }
+                               if (upperVal != null)
+                               {
+                                       long i = System.Convert.ToInt64(((System.ValueType) upperVal));
+                                       if (!includeUpper && i == System.Int64.MinValue)
+                                               return DocIdSet.EMPTY_DOCIDSET;
+                                       inclusiveUpperPoint = includeUpper?i:(i - 1L);
+                               }
+                               else
+                               {
+                                       inclusiveUpperPoint = System.Int64.MaxValue;
+                               }
+                               
+                               if (inclusiveLowerPoint > inclusiveUpperPoint)
+                                       return DocIdSet.EMPTY_DOCIDSET;
+                               
+                               long[] values = Mono.Lucene.Net.Search.FieldCache_Fields.DEFAULT.GetLongs(reader, field, (Mono.Lucene.Net.Search.LongParser) parser);
+                               // we only request the usage of termDocs, if the range contains 0
+                               return new AnonymousClassFieldCacheDocIdSet(values, inclusiveLowerPoint, inclusiveUpperPoint, this, reader, (inclusiveLowerPoint <= 0L && inclusiveUpperPoint >= 0L));
+                       }
+               }
+               [Serializable]
+               private class AnonymousClassFieldCacheRangeFilter5:FieldCacheRangeFilter
+               {
+                       private class AnonymousClassFieldCacheDocIdSet:FieldCacheDocIdSet
+                       {
+                               private void  InitBlock(float[] values, float inclusiveLowerPoint, float inclusiveUpperPoint, AnonymousClassFieldCacheRangeFilter5 enclosingInstance)
+                               {
+                                       this.values = values;
+                                       this.inclusiveLowerPoint = inclusiveLowerPoint;
+                                       this.inclusiveUpperPoint = inclusiveUpperPoint;
+                                       this.enclosingInstance = enclosingInstance;
+                               }
+                               private float[] values;
+                               private float inclusiveLowerPoint;
+                               private float inclusiveUpperPoint;
+                               private AnonymousClassFieldCacheRangeFilter5 enclosingInstance;
+                               public AnonymousClassFieldCacheRangeFilter5 Enclosing_Instance
+                               {
+                                       get
+                                       {
+                                               return enclosingInstance;
+                                       }
+                                       
+                               }
+                               internal AnonymousClassFieldCacheDocIdSet(float[] values, float inclusiveLowerPoint, float inclusiveUpperPoint, AnonymousClassFieldCacheRangeFilter5 enclosingInstance, Mono.Lucene.Net.Index.IndexReader Param1, bool Param2):base(Param1, Param2)
+                               {
+                                       InitBlock(values, inclusiveLowerPoint, inclusiveUpperPoint, enclosingInstance);
+                               }
+                               internal override bool MatchDoc(int doc)
+                               {
+                                       return values[doc] >= inclusiveLowerPoint && values[doc] <= inclusiveUpperPoint;
+                               }
+                       }
+                       internal AnonymousClassFieldCacheRangeFilter5(System.String Param1, Mono.Lucene.Net.Search.Parser Param2, System.Object Param3, System.Object Param4, bool Param5, bool Param6):base(Param1, Param2, Param3, Param4, Param5, Param6)
+                       {
+                       }
+                       public override DocIdSet GetDocIdSet(IndexReader reader)
+                       {
+                               // we transform the floating point numbers to sortable integers
+                               // using NumericUtils to easier find the next bigger/lower value
+                               float inclusiveLowerPoint;
+                               float inclusiveUpperPoint;
+                               if (lowerVal != null)
+                               {
+                                       float f = System.Convert.ToSingle(((System.ValueType) lowerVal));
+                                       if (!includeUpper && f > 0.0f && System.Single.IsInfinity(f))
+                                               return DocIdSet.EMPTY_DOCIDSET;
+                                       int i = NumericUtils.FloatToSortableInt(f);
+                                       inclusiveLowerPoint = NumericUtils.SortableIntToFloat(includeLower?i:(i + 1));
+                               }
+                               else
+                               {
+                                       inclusiveLowerPoint = System.Single.NegativeInfinity;
+                               }
+                               if (upperVal != null)
+                               {
+                                       float f = System.Convert.ToSingle(((System.ValueType) upperVal));
+                                       if (!includeUpper && f < 0.0f && System.Single.IsInfinity(f))
+                                               return DocIdSet.EMPTY_DOCIDSET;
+                                       int i = NumericUtils.FloatToSortableInt(f);
+                                       inclusiveUpperPoint = NumericUtils.SortableIntToFloat(includeUpper?i:(i - 1));
+                               }
+                               else
+                               {
+                                       inclusiveUpperPoint = System.Single.PositiveInfinity;
+                               }
+                               
+                               if (inclusiveLowerPoint > inclusiveUpperPoint)
+                                       return DocIdSet.EMPTY_DOCIDSET;
+                               
+                               float[] values = Mono.Lucene.Net.Search.FieldCache_Fields.DEFAULT.GetFloats(reader, field, (Mono.Lucene.Net.Search.FloatParser) parser);
+                               // we only request the usage of termDocs, if the range contains 0
+                               return new AnonymousClassFieldCacheDocIdSet(values, inclusiveLowerPoint, inclusiveUpperPoint, this, reader, (inclusiveLowerPoint <= 0.0f && inclusiveUpperPoint >= 0.0f));
+                       }
+               }
+               [Serializable]
+               private class AnonymousClassFieldCacheRangeFilter6:FieldCacheRangeFilter
+               {
+                       private class AnonymousClassFieldCacheDocIdSet:FieldCacheDocIdSet
+                       {
+                               private void  InitBlock(double[] values, double inclusiveLowerPoint, double inclusiveUpperPoint, AnonymousClassFieldCacheRangeFilter6 enclosingInstance)
+                               {
+                                       this.values = values;
+                                       this.inclusiveLowerPoint = inclusiveLowerPoint;
+                                       this.inclusiveUpperPoint = inclusiveUpperPoint;
+                                       this.enclosingInstance = enclosingInstance;
+                               }
+                               private double[] values;
+                               private double inclusiveLowerPoint;
+                               private double inclusiveUpperPoint;
+                               private AnonymousClassFieldCacheRangeFilter6 enclosingInstance;
+                               public AnonymousClassFieldCacheRangeFilter6 Enclosing_Instance
+                               {
+                                       get
+                                       {
+                                               return enclosingInstance;
+                                       }
+                                       
+                               }
+                               internal AnonymousClassFieldCacheDocIdSet(double[] values, double inclusiveLowerPoint, double inclusiveUpperPoint, AnonymousClassFieldCacheRangeFilter6 enclosingInstance, Mono.Lucene.Net.Index.IndexReader Param1, bool Param2):base(Param1, Param2)
+                               {
+                                       InitBlock(values, inclusiveLowerPoint, inclusiveUpperPoint, enclosingInstance);
+                               }
+                               internal override bool MatchDoc(int doc)
+                               {
+                                       return values[doc] >= inclusiveLowerPoint && values[doc] <= inclusiveUpperPoint;
+                               }
+                       }
+                       internal AnonymousClassFieldCacheRangeFilter6(System.String Param1, Mono.Lucene.Net.Search.Parser Param2, System.Object Param3, System.Object Param4, bool Param5, bool Param6):base(Param1, Param2, Param3, Param4, Param5, Param6)
+                       {
+                       }
+                       public override DocIdSet GetDocIdSet(IndexReader reader)
+                       {
+                               // we transform the floating point numbers to sortable integers
+                               // using NumericUtils to easier find the next bigger/lower value
+                               double inclusiveLowerPoint;
+                               double inclusiveUpperPoint;
+                               if (lowerVal != null)
+                               {
+                                       double f = System.Convert.ToDouble(((System.ValueType) lowerVal));
+                                       if (!includeUpper && f > 0.0 && System.Double.IsInfinity(f))
+                                               return DocIdSet.EMPTY_DOCIDSET;
+                                       long i = NumericUtils.DoubleToSortableLong(f);
+                                       inclusiveLowerPoint = NumericUtils.SortableLongToDouble(includeLower?i:(i + 1L));
+                               }
+                               else
+                               {
+                                       inclusiveLowerPoint = System.Double.NegativeInfinity;
+                               }
+                               if (upperVal != null)
+                               {
+                                       double f = System.Convert.ToDouble(((System.ValueType) upperVal));
+                                       if (!includeUpper && f < 0.0 && System.Double.IsInfinity(f))
+                                               return DocIdSet.EMPTY_DOCIDSET;
+                                       long i = NumericUtils.DoubleToSortableLong(f);
+                                       inclusiveUpperPoint = NumericUtils.SortableLongToDouble(includeUpper?i:(i - 1L));
+                               }
+                               else
+                               {
+                                       inclusiveUpperPoint = System.Double.PositiveInfinity;
+                               }
+                               
+                               if (inclusiveLowerPoint > inclusiveUpperPoint)
+                                       return DocIdSet.EMPTY_DOCIDSET;
+                               
+                               double[] values = Mono.Lucene.Net.Search.FieldCache_Fields.DEFAULT.GetDoubles(reader, field, (Mono.Lucene.Net.Search.DoubleParser) parser);
+                               // we only request the usage of termDocs, if the range contains 0
+                               return new AnonymousClassFieldCacheDocIdSet(values, inclusiveLowerPoint, inclusiveUpperPoint, this, reader, (inclusiveLowerPoint <= 0.0 && inclusiveUpperPoint >= 0.0));
+                       }
+               }
+               internal System.String field;
+               internal Mono.Lucene.Net.Search.Parser parser;
+               internal System.Object lowerVal;
+               internal System.Object upperVal;
+               internal bool includeLower;
+               internal bool includeUpper;
+               
+               private FieldCacheRangeFilter(System.String field, Mono.Lucene.Net.Search.Parser parser, System.Object lowerVal, System.Object upperVal, bool includeLower, bool includeUpper)
+               {
+                       this.field = field;
+                       this.parser = parser;
+                       this.lowerVal = lowerVal;
+                       this.upperVal = upperVal;
+                       this.includeLower = includeLower;
+                       this.includeUpper = includeUpper;
+               }
+               
+               /// <summary>This method is implemented for each data type </summary>
+               public abstract override DocIdSet GetDocIdSet(IndexReader reader);
+               
+               /// <summary> Creates a string range query using {@link FieldCache#getStringIndex}. This works with all
+               /// fields containing zero or one term in the field. The range can be half-open by setting one
+               /// of the values to <code>null</code>.
+               /// </summary>
+               public static FieldCacheRangeFilter NewStringRange(System.String field, System.String lowerVal, System.String upperVal, bool includeLower, bool includeUpper)
+               {
+                       return new AnonymousClassFieldCacheRangeFilter(field, null, lowerVal, upperVal, includeLower, includeUpper);
+               }
+               
+               /// <summary> Creates a numeric range query using {@link FieldCache#GetBytes(IndexReader,String)}. This works with all
+               /// byte fields containing exactly one numeric term in the field. The range can be half-open by setting one
+               /// of the values to <code>null</code>.
+               /// </summary>
+               public static FieldCacheRangeFilter NewByteRange(System.String field, System.Byte lowerVal, System.Byte upperVal, bool includeLower, bool includeUpper)
+               {
+                       return NewByteRange(field, null, lowerVal, upperVal, includeLower, includeUpper);
+               }
+               
+               /// <summary> Creates a numeric range query using {@link FieldCache#GetBytes(IndexReader,String,FieldCache.ByteParser)}. This works with all
+               /// byte fields containing exactly one numeric term in the field. The range can be half-open by setting one
+               /// of the values to <code>null</code>.
+               /// </summary>
+               public static FieldCacheRangeFilter NewByteRange(System.String field, Mono.Lucene.Net.Search.ByteParser parser, System.Byte lowerVal, System.Byte upperVal, bool includeLower, bool includeUpper)
+               {
+                       return new AnonymousClassFieldCacheRangeFilter1(field, parser, lowerVal, upperVal, includeLower, includeUpper);
+               }
+               
+               /// <summary> Creates a numeric range query using {@link FieldCache#GetShorts(IndexReader,String)}. This works with all
+               /// short fields containing exactly one numeric term in the field. The range can be half-open by setting one
+               /// of the values to <code>null</code>.
+               /// </summary>
+               public static FieldCacheRangeFilter NewShortRange(System.String field, System.ValueType lowerVal, System.ValueType upperVal, bool includeLower, bool includeUpper)
+               {
+                       return NewShortRange(field, null, lowerVal, upperVal, includeLower, includeUpper);
+               }
+               
+               /// <summary> Creates a numeric range query using {@link FieldCache#GetShorts(IndexReader,String,FieldCache.ShortParser)}. This works with all
+               /// short fields containing exactly one numeric term in the field. The range can be half-open by setting one
+               /// of the values to <code>null</code>.
+               /// </summary>
+               public static FieldCacheRangeFilter NewShortRange(System.String field, Mono.Lucene.Net.Search.ShortParser parser, System.ValueType lowerVal, System.ValueType upperVal, bool includeLower, bool includeUpper)
+               {
+                       return new AnonymousClassFieldCacheRangeFilter2(field, parser, lowerVal, upperVal, includeLower, includeUpper);
+               }
+               
+               /// <summary> Creates a numeric range query using {@link FieldCache#GetInts(IndexReader,String)}. This works with all
+               /// int fields containing exactly one numeric term in the field. The range can be half-open by setting one
+               /// of the values to <code>null</code>.
+               /// </summary>
+        public static FieldCacheRangeFilter NewIntRange(System.String field, System.ValueType lowerVal, System.ValueType upperVal, bool includeLower, bool includeUpper)
+               {
+                       return NewIntRange(field, null, lowerVal, upperVal, includeLower, includeUpper);
+               }
+               
+               /// <summary> Creates a numeric range query using {@link FieldCache#GetInts(IndexReader,String,FieldCache.IntParser)}. This works with all
+               /// int fields containing exactly one numeric term in the field. The range can be half-open by setting one
+               /// of the values to <code>null</code>.
+               /// </summary>
+               public static FieldCacheRangeFilter NewIntRange(System.String field, Mono.Lucene.Net.Search.IntParser parser, System.ValueType lowerVal, System.ValueType upperVal, bool includeLower, bool includeUpper)
+               {
+                       return new AnonymousClassFieldCacheRangeFilter3(field, parser, lowerVal, upperVal, includeLower, includeUpper);
+               }
+               
+               /// <summary> Creates a numeric range query using {@link FieldCache#GetLongs(IndexReader,String)}. This works with all
+               /// long fields containing exactly one numeric term in the field. The range can be half-open by setting one
+               /// of the values to <code>null</code>.
+               /// </summary>
+               public static FieldCacheRangeFilter NewLongRange(System.String field, System.ValueType lowerVal, System.ValueType upperVal, bool includeLower, bool includeUpper)
+               {
+                       return NewLongRange(field, null, lowerVal, upperVal, includeLower, includeUpper);
+               }
+               
+               /// <summary> Creates a numeric range query using {@link FieldCache#GetLongs(IndexReader,String,FieldCache.LongParser)}. This works with all
+               /// long fields containing exactly one numeric term in the field. The range can be half-open by setting one
+               /// of the values to <code>null</code>.
+               /// </summary>
+               public static FieldCacheRangeFilter NewLongRange(System.String field, Mono.Lucene.Net.Search.LongParser parser, System.ValueType lowerVal, System.ValueType upperVal, bool includeLower, bool includeUpper)
+               {
+                       return new AnonymousClassFieldCacheRangeFilter4(field, parser, lowerVal, upperVal, includeLower, includeUpper);
+               }
+               
+               /// <summary> Creates a numeric range query using {@link FieldCache#GetFloats(IndexReader,String)}. This works with all
+               /// float fields containing exactly one numeric term in the field. The range can be half-open by setting one
+               /// of the values to <code>null</code>.
+               /// </summary>
+               public static FieldCacheRangeFilter NewFloatRange(System.String field, System.ValueType lowerVal, System.ValueType upperVal, bool includeLower, bool includeUpper)
+               {
+                       return NewFloatRange(field, null, lowerVal, upperVal, includeLower, includeUpper);
+               }
+               
+               /// <summary> Creates a numeric range query using {@link FieldCache#GetFloats(IndexReader,String,FieldCache.FloatParser)}. This works with all
+               /// float fields containing exactly one numeric term in the field. The range can be half-open by setting one
+               /// of the values to <code>null</code>.
+               /// </summary>
+               public static FieldCacheRangeFilter NewFloatRange(System.String field, Mono.Lucene.Net.Search.FloatParser parser, System.ValueType lowerVal, System.ValueType upperVal, bool includeLower, bool includeUpper)
+               {
+                       return new AnonymousClassFieldCacheRangeFilter5(field, parser, lowerVal, upperVal, includeLower, includeUpper);
+               }
+               
+               /// <summary> Creates a numeric range query using {@link FieldCache#GetDoubles(IndexReader,String)}. This works with all
+               /// double fields containing exactly one numeric term in the field. The range can be half-open by setting one
+               /// of the values to <code>null</code>.
+               /// </summary>
+               public static FieldCacheRangeFilter NewDoubleRange(System.String field, System.ValueType lowerVal, System.ValueType upperVal, bool includeLower, bool includeUpper)
+               {
+                       return NewDoubleRange(field, null, lowerVal, upperVal, includeLower, includeUpper);
+               }
+               
+               /// <summary> Creates a numeric range query using {@link FieldCache#GetDoubles(IndexReader,String,FieldCache.DoubleParser)}. This works with all
+               /// double fields containing exactly one numeric term in the field. The range can be half-open by setting one
+               /// of the values to <code>null</code>.
+               /// </summary>
+               public static FieldCacheRangeFilter NewDoubleRange(System.String field, Mono.Lucene.Net.Search.DoubleParser parser, System.ValueType lowerVal, System.ValueType upperVal, bool includeLower, bool includeUpper)
+               {
+                       return new AnonymousClassFieldCacheRangeFilter6(field, parser, lowerVal, upperVal, includeLower, includeUpper);
+               }
+               
+               public override System.String ToString()
+               {
+                       System.Text.StringBuilder sb = new System.Text.StringBuilder(field).Append(":");
+                       return sb.Append(includeLower?'[':'{').Append((lowerVal == null)?"*":lowerVal.ToString()).Append(" TO ").Append((upperVal == null)?"*":upperVal.ToString()).Append(includeUpper?']':'}').ToString();
+               }
+               
+               public  override bool Equals(System.Object o)
+               {
+                       if (this == o)
+                               return true;
+                       if (!(o is FieldCacheRangeFilter))
+                               return false;
+                       FieldCacheRangeFilter other = (FieldCacheRangeFilter) o;
+                       
+                       if (!this.field.Equals(other.field) || this.includeLower != other.includeLower || this.includeUpper != other.includeUpper)
+                       {
+                               return false;
+                       }
+                       if (this.lowerVal != null?!this.lowerVal.Equals(other.lowerVal):other.lowerVal != null)
+                               return false;
+                       if (this.upperVal != null?!this.upperVal.Equals(other.upperVal):other.upperVal != null)
+                               return false;
+                       if (this.parser != null?!this.parser.Equals(other.parser):other.parser != null)
+                               return false;
+                       return true;
+               }
+               
+               public override int GetHashCode()
+               {
+                       int h = field.GetHashCode();
+                       h ^= ((lowerVal != null)?lowerVal.GetHashCode():550356204);
+                       h = (h << 1) | (SupportClass.Number.URShift(h, 31)); // rotate to distinguish lower from upper
+                       h ^= ((upperVal != null)?upperVal.GetHashCode():- 1674416163);
+                       h ^= ((parser != null)?parser.GetHashCode():- 1572457324);
+                       h ^= (includeLower?1549299360:- 365038026) ^ (includeUpper?1721088258:1948649653);
+                       return h;
+               }
+               
+               internal abstract class FieldCacheDocIdSet:DocIdSet
+               {
+                       private class AnonymousClassDocIdSetIterator:DocIdSetIterator
+                       {
+                               public AnonymousClassDocIdSetIterator(Mono.Lucene.Net.Index.TermDocs termDocs, FieldCacheDocIdSet enclosingInstance)
+                               {
+                                       InitBlock(termDocs, enclosingInstance);
+                               }
+                               private void  InitBlock(Mono.Lucene.Net.Index.TermDocs termDocs, FieldCacheDocIdSet enclosingInstance)
+                               {
+                                       this.termDocs = termDocs;
+                                       this.enclosingInstance = enclosingInstance;
+                               }
+                               private Mono.Lucene.Net.Index.TermDocs termDocs;
+                               private FieldCacheDocIdSet enclosingInstance;
+                               public FieldCacheDocIdSet Enclosing_Instance
+                               {
+                                       get
+                                       {
+                                               return enclosingInstance;
+                                       }
+                                       
+                               }
+                               private int doc = - 1;
+                               
+                               /** @deprecated use {@link #NextDoc()} instead. */
+                [Obsolete("Mono.Lucene.Net-2.9.1. This method overrides obsolete member Mono.Lucene.Net.Search.DocIdSetIterator.Next()")]
+                               public override bool Next()
+                               {
+                                       return NextDoc() != NO_MORE_DOCS;
+                               }
+                               
+                               /// <deprecated> use {@link #Advance(int)} instead. 
+                               /// </deprecated>
+                [Obsolete("use Advance(int) instead.")]
+                               public override bool SkipTo(int target)
+                               {
+                                       return Advance(target) != NO_MORE_DOCS;
+                               }
+                               
+                               /// <deprecated> use {@link #DocID()} instead. 
+                               /// </deprecated>
+                [Obsolete("use DocID() instead.")]
+                               public override int Doc()
+                               {
+                                       return termDocs.Doc();
+                               }
+                               
+                               public override int DocID()
+                               {
+                                       return doc;
+                               }
+                               
+                               public override int NextDoc()
+                               {
+                                       do 
+                                       {
+                                               if (!termDocs.Next())
+                                                       return doc = NO_MORE_DOCS;
+                                       }
+                                       while (!Enclosing_Instance.MatchDoc(doc = termDocs.Doc()));
+                                       return doc;
+                               }
+                               
+                               public override int Advance(int target)
+                               {
+                                       if (!termDocs.SkipTo(target))
+                                               return doc = NO_MORE_DOCS;
+                                       while (!Enclosing_Instance.MatchDoc(doc = termDocs.Doc()))
+                                       {
+                                               if (!termDocs.Next())
+                                                       return doc = NO_MORE_DOCS;
+                                       }
+                                       return doc;
+                               }
+                       }
+                       private class AnonymousClassDocIdSetIterator1:DocIdSetIterator
+                       {
+                               public AnonymousClassDocIdSetIterator1(FieldCacheDocIdSet enclosingInstance)
+                               {
+                                       InitBlock(enclosingInstance);
+                               }
+                               private void  InitBlock(FieldCacheDocIdSet enclosingInstance)
+                               {
+                                       this.enclosingInstance = enclosingInstance;
+                               }
+                               private FieldCacheDocIdSet enclosingInstance;
+                               public FieldCacheDocIdSet Enclosing_Instance
+                               {
+                                       get
+                                       {
+                                               return enclosingInstance;
+                                       }
+                                       
+                               }
+                               private int doc = - 1;
+                               
+                               /// <deprecated> use {@link #NextDoc()} instead. 
+                               /// </deprecated>
+                [Obsolete("use NextDoc() instead.")]
+                               public override bool Next()
+                               {
+                                       return NextDoc() != NO_MORE_DOCS;
+                               }
+                               
+                               /// <deprecated> use {@link #Advance(int)} instead. 
+                               /// </deprecated>
+                [Obsolete("use Advance(int) instead.")]
+                               public override bool SkipTo(int target)
+                               {
+                                       return Advance(target) != NO_MORE_DOCS;
+                               }
+                               
+                               /// <deprecated> use {@link #DocID()} instead. 
+                               /// </deprecated>
+                [Obsolete("use DocID() instead. ")]
+                               public override int Doc()
+                               {
+                                       return doc;
+                               }
+                               
+                               public override int DocID()
+                               {
+                                       return doc;
+                               }
+                               
+                               public override int NextDoc()
+                               {
+                                       try
+                                       {
+                                               do 
+                                               {
+                                                       doc++;
+                                               }
+                                               while (!Enclosing_Instance.MatchDoc(doc));
+                                               return doc;
+                                       }
+                                       catch (System.IndexOutOfRangeException e)
+                                       {
+                                               return doc = NO_MORE_DOCS;
+                                       }
+                               }
+                               
+                               public override int Advance(int target)
+                               {
+                                       try
+                                       {
+                                               doc = target;
+                                               while (!Enclosing_Instance.MatchDoc(doc))
+                                               {
+                                                       doc++;
+                                               }
+                                               return doc;
+                                       }
+                                       catch (System.IndexOutOfRangeException e)
+                                       {
+                                               return doc = NO_MORE_DOCS;
+                                       }
+                               }
+                       }
+                       private IndexReader reader;
+                       private bool mayUseTermDocs;
+                       
+                       internal FieldCacheDocIdSet(IndexReader reader, bool mayUseTermDocs)
+                       {
+                               this.reader = reader;
+                               this.mayUseTermDocs = mayUseTermDocs;
+                       }
+                       
+                       /// <summary>this method checks, if a doc is a hit, should throw AIOBE, when position invalid </summary>
+                       internal abstract bool MatchDoc(int doc);
+
+                       /// <summary>this DocIdSet is cacheable, if it works solely with FieldCache and no TermDocs </summary>
+                       public override bool IsCacheable()
+                       {
+                               return !(mayUseTermDocs && reader.HasDeletions());
+                       }
+                       
+                       public override DocIdSetIterator Iterator()
+                       {
+                               // Synchronization needed because deleted docs BitVector
+                               // can change after call to hasDeletions until TermDocs creation.
+                               // We only use an iterator with termDocs, when this was requested (e.g. range contains 0)
+                               // and the index has deletions
+                               TermDocs termDocs;
+                               lock (reader)
+                               {
+                                       termDocs = IsCacheable() ? null : reader.TermDocs(null);
+                               }
+                               if (termDocs != null)
+                               {
+                                       // a DocIdSetIterator using TermDocs to iterate valid docIds
+                                       return new AnonymousClassDocIdSetIterator(termDocs, this);
+                               }
+                               else
+                               {
+                                       // a DocIdSetIterator generating docIds by incrementing a variable -
+                                       // this one can be used if there are no deletions are on the index
+                                       return new AnonymousClassDocIdSetIterator1(this);
+                               }
+                       }
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/FieldCacheTermsFilter.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/FieldCacheTermsFilter.cs
new file mode 100644 (file)
index 0000000..9d5b885
--- /dev/null
@@ -0,0 +1,247 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+using TermDocs = Mono.Lucene.Net.Index.TermDocs;
+using OpenBitSet = Mono.Lucene.Net.Util.OpenBitSet;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary> A {@link Filter} that only accepts documents whose single
+       /// term value in the specified field is contained in the
+       /// provided set of allowed terms.
+       /// 
+       /// <p/>
+       /// 
+       /// This is the same functionality as TermsFilter (from
+       /// contrib/queries), except this filter requires that the
+       /// field contains only a single term for all documents.
+       /// Because of drastically different implementations, they
+       /// also have different performance characteristics, as
+       /// described below.
+       /// 
+       /// <p/>
+       /// 
+       /// The first invocation of this filter on a given field will
+       /// be slower, since a {@link FieldCache.StringIndex} must be
+       /// created.  Subsequent invocations using the same field
+       /// will re-use this cache.  However, as with all
+       /// functionality based on {@link FieldCache}, persistent RAM
+       /// is consumed to hold the cache, and is not freed until the
+       /// {@link IndexReader} is closed.  In contrast, TermsFilter
+       /// has no persistent RAM consumption.
+       /// 
+       /// 
+       /// <p/>
+       /// 
+       /// With each search, this filter translates the specified
+       /// set of Terms into a private {@link OpenBitSet} keyed by
+       /// term number per unique {@link IndexReader} (normally one
+       /// reader per segment).  Then, during matching, the term
+       /// number for each docID is retrieved from the cache and
+       /// then checked for inclusion using the {@link OpenBitSet}.
+       /// Since all testing is done using RAM resident data
+       /// structures, performance should be very fast, most likely
+       /// fast enough to not require further caching of the
+       /// DocIdSet for each possible combination of terms.
+       /// However, because docIDs are simply scanned linearly, an
+       /// index with a great many small documents may find this
+       /// linear scan too costly.
+       /// 
+       /// <p/>
+       /// 
+       /// In contrast, TermsFilter builds up an {@link OpenBitSet},
+       /// keyed by docID, every time it's created, by enumerating
+       /// through all matching docs using {@link TermDocs} to seek
+       /// and scan through each term's docID list.  While there is
+       /// no linear scan of all docIDs, besides the allocation of
+       /// the underlying array in the {@link OpenBitSet}, this
+       /// approach requires a number of "disk seeks" in proportion
+       /// to the number of terms, which can be exceptionally costly
+       /// when there are cache misses in the OS's IO cache.
+       /// 
+       /// <p/>
+       /// 
+       /// Generally, this filter will be slower on the first
+       /// invocation for a given field, but subsequent invocations,
+       /// even if you change the allowed set of Terms, should be
+       /// faster than TermsFilter, especially as the number of
+       /// Terms being matched increases.  If you are matching only
+       /// a very small number of terms, and those terms in turn
+       /// match a very small number of documents, TermsFilter may
+       /// perform faster.
+       /// 
+       /// <p/>
+       /// 
+       /// Which filter is best is very application dependent.
+       /// </summary>
+       
+       [Serializable]
+       public class FieldCacheTermsFilter:Filter
+       {
+               private System.String field;
+               private System.String[] terms;
+               
+               public FieldCacheTermsFilter(System.String field, System.String[] terms)
+               {
+                       this.field = field;
+                       this.terms = terms;
+               }
+               
+               public virtual FieldCache GetFieldCache()
+               {
+                       return Mono.Lucene.Net.Search.FieldCache_Fields.DEFAULT;
+               }
+               
+               public override DocIdSet GetDocIdSet(IndexReader reader)
+               {
+                       return new FieldCacheTermsFilterDocIdSet(this, GetFieldCache().GetStringIndex(reader, field));
+               }
+               
+               protected internal class FieldCacheTermsFilterDocIdSet:DocIdSet
+               {
+                       private void  InitBlock(FieldCacheTermsFilter enclosingInstance)
+                       {
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private FieldCacheTermsFilter enclosingInstance;
+                       public FieldCacheTermsFilter Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       private Mono.Lucene.Net.Search.StringIndex fcsi;
+                       
+                       private OpenBitSet openBitSet;
+                       
+                       public FieldCacheTermsFilterDocIdSet(FieldCacheTermsFilter enclosingInstance, Mono.Lucene.Net.Search.StringIndex fcsi)
+                       {
+                               InitBlock(enclosingInstance);
+                               this.fcsi = fcsi;
+                               openBitSet = new OpenBitSet(this.fcsi.lookup.Length);
+                               for (int i = 0; i < Enclosing_Instance.terms.Length; i++)
+                               {
+                                       int termNumber = this.fcsi.BinarySearchLookup(Enclosing_Instance.terms[i]);
+                                       if (termNumber > 0)
+                                       {
+                                               openBitSet.FastSet(termNumber);
+                                       }
+                               }
+                       }
+                       
+                       public override DocIdSetIterator Iterator()
+                       {
+                               return new FieldCacheTermsFilterDocIdSetIterator(this);
+                       }
+
+                       /// <summary>This DocIdSet implementation is cacheable. </summary>
+                       public override bool IsCacheable()
+                       {
+                               return true;
+                       }
+                       
+                       protected internal class FieldCacheTermsFilterDocIdSetIterator:DocIdSetIterator
+                       {
+                               public FieldCacheTermsFilterDocIdSetIterator(FieldCacheTermsFilterDocIdSet enclosingInstance)
+                               {
+                                       InitBlock(enclosingInstance);
+                               }
+                               private void  InitBlock(FieldCacheTermsFilterDocIdSet enclosingInstance)
+                               {
+                                       this.enclosingInstance = enclosingInstance;
+                               }
+                               private FieldCacheTermsFilterDocIdSet enclosingInstance;
+                               public FieldCacheTermsFilterDocIdSet Enclosing_Instance
+                               {
+                                       get
+                                       {
+                                               return enclosingInstance;
+                                       }
+                                       
+                               }
+                               private int doc = - 1;
+                               
+                               /// <deprecated> use {@link #DocID()} instead. 
+                               /// </deprecated>
+                [Obsolete("use DocID() instead.")]
+                               public override int Doc()
+                               {
+                                       return doc;
+                               }
+                               
+                               public override int DocID()
+                               {
+                                       return doc;
+                               }
+                               
+                               /// <deprecated> use {@link #NextDoc()} instead. 
+                               /// </deprecated>
+                [Obsolete("use NextDoc() instead.")]
+                               public override bool Next()
+                               {
+                                       return NextDoc() != NO_MORE_DOCS;
+                               }
+                               
+                               public override int NextDoc()
+                               {
+                                       try
+                                       {
+                                               while (!Enclosing_Instance.openBitSet.FastGet(Enclosing_Instance.fcsi.order[++doc]))
+                                               {
+                                               }
+                                       }
+                                       catch (System.IndexOutOfRangeException e)
+                                       {
+                                               doc = NO_MORE_DOCS;
+                                       }
+                                       return doc;
+                               }
+                               
+                               /// <deprecated> use {@link #Advance(int)} instead. 
+                               /// </deprecated>
+                [Obsolete("use Advance(int) instead.")]
+                               public override bool SkipTo(int target)
+                               {
+                                       return Advance(target) != NO_MORE_DOCS;
+                               }
+                               
+                               public override int Advance(int target)
+                               {
+                                       try
+                                       {
+                                               doc = target;
+                                               while (!Enclosing_Instance.openBitSet.FastGet(Enclosing_Instance.fcsi.order[doc]))
+                                               {
+                                                       doc++;
+                                               }
+                                       }
+                                       catch (System.IndexOutOfRangeException e)
+                                       {
+                                               doc = NO_MORE_DOCS;
+                                       }
+                                       return doc;
+                               }
+                       }
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/FieldComparator.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/FieldComparator.cs
new file mode 100644 (file)
index 0000000..8cd937b
--- /dev/null
@@ -0,0 +1,1066 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+using ByteParser = Mono.Lucene.Net.Search.ByteParser;
+using DoubleParser = Mono.Lucene.Net.Search.DoubleParser;
+using FloatParser = Mono.Lucene.Net.Search.FloatParser;
+using IntParser = Mono.Lucene.Net.Search.IntParser;
+using LongParser = Mono.Lucene.Net.Search.LongParser;
+using ShortParser = Mono.Lucene.Net.Search.ShortParser;
+using StringIndex = Mono.Lucene.Net.Search.StringIndex;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary> Expert: a FieldComparator compares hits so as to determine their
+       /// sort order when collecting the top results with {@link
+       /// TopFieldCollector}.  The concrete public FieldComparator
+       /// classes here correspond to the SortField types.
+       /// 
+       /// <p/>This API is designed to achieve high performance
+       /// sorting, by exposing a tight interaction with {@link
+       /// FieldValueHitQueue} as it visits hits.  Whenever a hit is
+       /// competitive, it's enrolled into a virtual slot, which is
+       /// an int ranging from 0 to numHits-1.  The {@link
+       /// FieldComparator} is made aware of segment transitions
+       /// during searching in case any internal state it's tracking
+       /// needs to be recomputed during these transitions.<p/>
+       /// 
+       /// <p/>A comparator must define these functions:<p/>
+       /// 
+       /// <ul>
+       /// 
+       /// <li> {@link #compare} Compare a hit at 'slot a'
+       /// with hit 'slot b'.</li>
+       /// 
+       /// <li> {@link #setBottom} This method is called by
+       /// {@link FieldValueHitQueue} to notify the
+       /// FieldComparator of the current weakest ("bottom")
+       /// slot.  Note that this slot may not hold the weakest
+       /// value according to your comparator, in cases where
+       /// your comparator is not the primary one (ie, is only
+       /// used to break ties from the comparators before it).</li>
+       /// 
+       /// <li> {@link #compareBottom} Compare a new hit (docID)
+       /// against the "weakest" (bottom) entry in the queue.</li>
+       /// 
+       /// <li> {@link #copy} Installs a new hit into the
+       /// priority queue.  The {@link FieldValueHitQueue}
+       /// calls this method when a new hit is competitive.</li>
+       /// 
+       /// <li> {@link #setNextReader} Invoked
+       /// when the search is switching to the next segment.
+       /// You may need to update internal state of the
+       /// comparator, for example retrieving new values from
+       /// the {@link FieldCache}.</li>
+       /// 
+       /// <li> {@link #value} Return the sort value stored in
+       /// the specified slot.  This is only called at the end
+       /// of the search, in order to populate {@link
+       /// FieldDoc#fields} when returning the top results.</li>
+       /// </ul>
+       /// 
+       /// <b>NOTE:</b> This API is experimental and might change in
+       /// incompatible ways in the next release.
+       /// </summary>
+       public abstract class FieldComparator
+       {
+               
+               /// <summary>Parses field's values as byte (using {@link
+               /// FieldCache#getBytes} and sorts by ascending value 
+               /// </summary>
+               public sealed class ByteComparator:FieldComparator
+               {
+                       private sbyte[] values;
+                       private sbyte[] currentReaderValues;
+                       private System.String field;
+                       private ByteParser parser;
+                       private sbyte bottom;
+                       
+                       internal ByteComparator(int numHits, System.String field, Mono.Lucene.Net.Search.Parser parser)
+                       {
+                               values = new sbyte[numHits];
+                               this.field = field;
+                               this.parser = (ByteParser) parser;
+                       }
+                       
+                       public override int Compare(int slot1, int slot2)
+                       {
+                               return values[slot1] - values[slot2];
+                       }
+                       
+                       public override int CompareBottom(int doc)
+                       {
+                               return bottom - currentReaderValues[doc];
+                       }
+                       
+                       public override void  Copy(int slot, int doc)
+                       {
+                               values[slot] = currentReaderValues[doc];
+                       }
+                       
+                       public override void  SetNextReader(IndexReader reader, int docBase)
+                       {
+                               currentReaderValues = Mono.Lucene.Net.Search.FieldCache_Fields.DEFAULT.GetBytes(reader, field, parser);
+                       }
+                       
+                       public override void  SetBottom(int bottom)
+                       {
+                               this.bottom = values[bottom];
+                       }
+                       
+                       public override System.IComparable Value(int slot)
+                       {
+                               return (sbyte) values[slot];
+                       }
+               }
+               
+               /// <summary>Sorts by ascending docID </summary>
+               public sealed class DocComparator:FieldComparator
+               {
+                       private int[] docIDs;
+                       private int docBase;
+                       private int bottom;
+                       
+                       internal DocComparator(int numHits)
+                       {
+                               docIDs = new int[numHits];
+                       }
+                       
+                       public override int Compare(int slot1, int slot2)
+                       {
+                               // No overflow risk because docIDs are non-negative
+                               return docIDs[slot1] - docIDs[slot2];
+                       }
+                       
+                       public override int CompareBottom(int doc)
+                       {
+                               // No overflow risk because docIDs are non-negative
+                               return bottom - (docBase + doc);
+                       }
+                       
+                       public override void  Copy(int slot, int doc)
+                       {
+                               docIDs[slot] = docBase + doc;
+                       }
+                       
+                       public override void  SetNextReader(IndexReader reader, int docBase)
+                       {
+                               // TODO: can we "map" our docIDs to the current
+                               // reader? saves having to then subtract on every
+                               // compare call
+                               this.docBase = docBase;
+                       }
+                       
+                       public override void  SetBottom(int bottom)
+                       {
+                               this.bottom = docIDs[bottom];
+                       }
+                       
+                       public override System.IComparable Value(int slot)
+                       {
+                               return (System.Int32) docIDs[slot];
+                       }
+               }
+               
+               /// <summary>Parses field's values as double (using {@link
+               /// FieldCache#getDoubles} and sorts by ascending value 
+               /// </summary>
+               public sealed class DoubleComparator:FieldComparator
+               {
+                       private double[] values;
+                       private double[] currentReaderValues;
+                       private System.String field;
+                       private DoubleParser parser;
+                       private double bottom;
+                       
+                       internal DoubleComparator(int numHits, System.String field, Mono.Lucene.Net.Search.Parser parser)
+                       {
+                               values = new double[numHits];
+                               this.field = field;
+                               this.parser = (DoubleParser) parser;
+                       }
+                       
+                       public override int Compare(int slot1, int slot2)
+                       {
+                               double v1 = values[slot1];
+                               double v2 = values[slot2];
+                               if (v1 > v2)
+                               {
+                                       return 1;
+                               }
+                               else if (v1 < v2)
+                               {
+                                       return - 1;
+                               }
+                               else
+                               {
+                                       return 0;
+                               }
+                       }
+                       
+                       public override int CompareBottom(int doc)
+                       {
+                               double v2 = currentReaderValues[doc];
+                               if (bottom > v2)
+                               {
+                                       return 1;
+                               }
+                               else if (bottom < v2)
+                               {
+                                       return - 1;
+                               }
+                               else
+                               {
+                                       return 0;
+                               }
+                       }
+                       
+                       public override void  Copy(int slot, int doc)
+                       {
+                               values[slot] = currentReaderValues[doc];
+                       }
+                       
+                       public override void  SetNextReader(IndexReader reader, int docBase)
+                       {
+                               currentReaderValues = Mono.Lucene.Net.Search.FieldCache_Fields.DEFAULT.GetDoubles(reader, field, parser);
+                       }
+                       
+                       public override void  SetBottom(int bottom)
+                       {
+                               this.bottom = values[bottom];
+                       }
+                       
+                       public override System.IComparable Value(int slot)
+                       {
+                               return (double) values[slot];
+                       }
+               }
+               
+               /// <summary>Parses field's values as float (using {@link
+               /// FieldCache#getFloats} and sorts by ascending value 
+               /// </summary>
+               public sealed class FloatComparator:FieldComparator
+               {
+                       private float[] values;
+                       private float[] currentReaderValues;
+                       private System.String field;
+                       private FloatParser parser;
+                       private float bottom;
+                       
+                       internal FloatComparator(int numHits, System.String field, Mono.Lucene.Net.Search.Parser parser)
+                       {
+                               values = new float[numHits];
+                               this.field = field;
+                               this.parser = (FloatParser) parser;
+                       }
+                       
+                       public override int Compare(int slot1, int slot2)
+                       {
+                               // TODO: are there sneaky non-branch ways to compute
+                               // sign of float?
+                               float v1 = values[slot1];
+                               float v2 = values[slot2];
+                               if (v1 > v2)
+                               {
+                                       return 1;
+                               }
+                               else if (v1 < v2)
+                               {
+                                       return - 1;
+                               }
+                               else
+                               {
+                                       return 0;
+                               }
+                       }
+                       
+                       public override int CompareBottom(int doc)
+                       {
+                               // TODO: are there sneaky non-branch ways to compute
+                               // sign of float?
+                               float v2 = currentReaderValues[doc];
+                               if (bottom > v2)
+                               {
+                                       return 1;
+                               }
+                               else if (bottom < v2)
+                               {
+                                       return - 1;
+                               }
+                               else
+                               {
+                                       return 0;
+                               }
+                       }
+                       
+                       public override void  Copy(int slot, int doc)
+                       {
+                               values[slot] = currentReaderValues[doc];
+                       }
+                       
+                       public override void  SetNextReader(IndexReader reader, int docBase)
+                       {
+                               currentReaderValues = Mono.Lucene.Net.Search.FieldCache_Fields.DEFAULT.GetFloats(reader, field, parser);
+                       }
+                       
+                       public override void  SetBottom(int bottom)
+                       {
+                               this.bottom = values[bottom];
+                       }
+                       
+                       public override System.IComparable Value(int slot)
+                       {
+                               return (float) values[slot];
+                       }
+               }
+               
+               /// <summary>Parses field's values as int (using {@link
+               /// FieldCache#getInts} and sorts by ascending value 
+               /// </summary>
+               public sealed class IntComparator:FieldComparator
+               {
+                       private int[] values;
+                       private int[] currentReaderValues;
+                       private System.String field;
+                       private IntParser parser;
+                       private int bottom; // Value of bottom of queue
+                       
+                       internal IntComparator(int numHits, System.String field, Mono.Lucene.Net.Search.Parser parser)
+                       {
+                               values = new int[numHits];
+                               this.field = field;
+                               this.parser = (IntParser) parser;
+                       }
+                       
+                       public override int Compare(int slot1, int slot2)
+                       {
+                               // TODO: there are sneaky non-branch ways to compute
+                               // -1/+1/0 sign
+                               // Cannot return values[slot1] - values[slot2] because that
+                               // may overflow
+                               int v1 = values[slot1];
+                               int v2 = values[slot2];
+                               if (v1 > v2)
+                               {
+                                       return 1;
+                               }
+                               else if (v1 < v2)
+                               {
+                                       return - 1;
+                               }
+                               else
+                               {
+                                       return 0;
+                               }
+                       }
+                       
+                       public override int CompareBottom(int doc)
+                       {
+                               // TODO: there are sneaky non-branch ways to compute
+                               // -1/+1/0 sign
+                               // Cannot return bottom - values[slot2] because that
+                               // may overflow
+                               int v2 = currentReaderValues[doc];
+                               if (bottom > v2)
+                               {
+                                       return 1;
+                               }
+                               else if (bottom < v2)
+                               {
+                                       return - 1;
+                               }
+                               else
+                               {
+                                       return 0;
+                               }
+                       }
+                       
+                       public override void  Copy(int slot, int doc)
+                       {
+                               values[slot] = currentReaderValues[doc];
+                       }
+                       
+                       public override void  SetNextReader(IndexReader reader, int docBase)
+                       {
+                               currentReaderValues = Mono.Lucene.Net.Search.FieldCache_Fields.DEFAULT.GetInts(reader, field, parser);
+                       }
+                       
+                       public override void  SetBottom(int bottom)
+                       {
+                               this.bottom = values[bottom];
+                       }
+                       
+                       public override System.IComparable Value(int slot)
+                       {
+                               return (System.Int32) values[slot];
+                       }
+               }
+               
+               /// <summary>Parses field's values as long (using {@link
+               /// FieldCache#getLongs} and sorts by ascending value 
+               /// </summary>
+               public sealed class LongComparator:FieldComparator
+               {
+                       private long[] values;
+                       private long[] currentReaderValues;
+                       private System.String field;
+                       private LongParser parser;
+                       private long bottom;
+                       
+                       internal LongComparator(int numHits, System.String field, Mono.Lucene.Net.Search.Parser parser)
+                       {
+                               values = new long[numHits];
+                               this.field = field;
+                               this.parser = (LongParser) parser;
+                       }
+                       
+                       public override int Compare(int slot1, int slot2)
+                       {
+                               // TODO: there are sneaky non-branch ways to compute
+                               // -1/+1/0 sign
+                               long v1 = values[slot1];
+                               long v2 = values[slot2];
+                               if (v1 > v2)
+                               {
+                                       return 1;
+                               }
+                               else if (v1 < v2)
+                               {
+                                       return - 1;
+                               }
+                               else
+                               {
+                                       return 0;
+                               }
+                       }
+                       
+                       public override int CompareBottom(int doc)
+                       {
+                               // TODO: there are sneaky non-branch ways to compute
+                               // -1/+1/0 sign
+                               long v2 = currentReaderValues[doc];
+                               if (bottom > v2)
+                               {
+                                       return 1;
+                               }
+                               else if (bottom < v2)
+                               {
+                                       return - 1;
+                               }
+                               else
+                               {
+                                       return 0;
+                               }
+                       }
+                       
+                       public override void  Copy(int slot, int doc)
+                       {
+                               values[slot] = currentReaderValues[doc];
+                       }
+                       
+                       public override void  SetNextReader(IndexReader reader, int docBase)
+                       {
+                               currentReaderValues = Mono.Lucene.Net.Search.FieldCache_Fields.DEFAULT.GetLongs(reader, field, parser);
+                       }
+                       
+                       public override void  SetBottom(int bottom)
+                       {
+                               this.bottom = values[bottom];
+                       }
+                       
+                       public override System.IComparable Value(int slot)
+                       {
+                               return (long) values[slot];
+                       }
+               }
+               
+               /// <summary>Sorts by descending relevance.  NOTE: if you are
+               /// sorting only by descending relevance and then
+               /// secondarily by ascending docID, peformance is faster
+               /// using {@link TopScoreDocCollector} directly (which {@link
+               /// IndexSearcher#search} uses when no {@link Sort} is
+               /// specified). 
+               /// </summary>
+               public sealed class RelevanceComparator:FieldComparator
+               {
+                       private float[] scores;
+                       private float bottom;
+                       private Scorer scorer;
+                       
+                       internal RelevanceComparator(int numHits)
+                       {
+                               scores = new float[numHits];
+                       }
+                       
+                       public override int Compare(int slot1, int slot2)
+                       {
+                               float score1 = scores[slot1];
+                               float score2 = scores[slot2];
+                               return score1 > score2?- 1:(score1 < score2?1:0);
+                       }
+                       
+                       public override int CompareBottom(int doc)
+                       {
+                               float score = scorer.Score();
+                               return bottom > score?- 1:(bottom < score?1:0);
+                       }
+                       
+                       public override void  Copy(int slot, int doc)
+                       {
+                               scores[slot] = scorer.Score();
+                       }
+                       
+                       public override void  SetNextReader(IndexReader reader, int docBase)
+                       {
+                       }
+                       
+                       public override void  SetBottom(int bottom)
+                       {
+                               this.bottom = scores[bottom];
+                       }
+                       
+                       public override void  SetScorer(Scorer scorer)
+                       {
+                               // wrap with a ScoreCachingWrappingScorer so that successive calls to
+                               // score() will not incur score computation over and over again.
+                               this.scorer = new ScoreCachingWrappingScorer(scorer);
+                       }
+                       
+                       public override System.IComparable Value(int slot)
+                       {
+                               return (float) scores[slot];
+                       }
+               }
+               
+               /// <summary>Parses field's values as short (using {@link
+               /// FieldCache#getShorts} and sorts by ascending value 
+               /// </summary>
+               public sealed class ShortComparator:FieldComparator
+               {
+                       private short[] values;
+                       private short[] currentReaderValues;
+                       private System.String field;
+                       private ShortParser parser;
+                       private short bottom;
+                       
+                       internal ShortComparator(int numHits, System.String field, Mono.Lucene.Net.Search.Parser parser)
+                       {
+                               values = new short[numHits];
+                               this.field = field;
+                               this.parser = (ShortParser) parser;
+                       }
+                       
+                       public override int Compare(int slot1, int slot2)
+                       {
+                               return values[slot1] - values[slot2];
+                       }
+                       
+                       public override int CompareBottom(int doc)
+                       {
+                               return bottom - currentReaderValues[doc];
+                       }
+                       
+                       public override void  Copy(int slot, int doc)
+                       {
+                               values[slot] = currentReaderValues[doc];
+                       }
+                       
+                       public override void  SetNextReader(IndexReader reader, int docBase)
+                       {
+                               currentReaderValues = Mono.Lucene.Net.Search.FieldCache_Fields.DEFAULT.GetShorts(reader, field, parser);
+                       }
+                       
+                       public override void  SetBottom(int bottom)
+                       {
+                               this.bottom = values[bottom];
+                       }
+                       
+                       public override System.IComparable Value(int slot)
+                       {
+                               return (short) values[slot];
+                       }
+               }
+               
+               /// <summary>Sorts by a field's value using the Collator for a
+               /// given Locale.
+               /// </summary>
+               public sealed class StringComparatorLocale:FieldComparator
+               {
+                       
+                       private System.String[] values;
+                       private System.String[] currentReaderValues;
+                       private System.String field;
+                       internal System.Globalization.CompareInfo collator;
+                       private System.String bottom;
+                       
+                       internal StringComparatorLocale(int numHits, System.String field, System.Globalization.CultureInfo locale)
+                       {
+                               values = new System.String[numHits];
+                               this.field = field;
+                               collator = locale.CompareInfo;
+                       }
+                       
+                       public override int Compare(int slot1, int slot2)
+                       {
+                               System.String val1 = values[slot1];
+                               System.String val2 = values[slot2];
+                               if (val1 == null)
+                               {
+                                       if (val2 == null)
+                                       {
+                                               return 0;
+                                       }
+                                       return - 1;
+                               }
+                               else if (val2 == null)
+                               {
+                                       return 1;
+                               }
+                               return collator.Compare(val1.ToString(), val2.ToString());
+                       }
+                       
+                       public override int CompareBottom(int doc)
+                       {
+                               System.String val2 = currentReaderValues[doc];
+                               if (bottom == null)
+                               {
+                                       if (val2 == null)
+                                       {
+                                               return 0;
+                                       }
+                                       return - 1;
+                               }
+                               else if (val2 == null)
+                               {
+                                       return 1;
+                               }
+                               return collator.Compare(bottom.ToString(), val2.ToString());
+                       }
+                       
+                       public override void  Copy(int slot, int doc)
+                       {
+                               values[slot] = currentReaderValues[doc];
+                       }
+                       
+                       public override void  SetNextReader(IndexReader reader, int docBase)
+                       {
+                               currentReaderValues = Mono.Lucene.Net.Search.FieldCache_Fields.DEFAULT.GetStrings(reader, field);
+                       }
+                       
+                       public override void  SetBottom(int bottom)
+                       {
+                               this.bottom = values[bottom];
+                       }
+                       
+                       public override System.IComparable Value(int slot)
+                       {
+                               return values[slot];
+                       }
+               }
+               
+               /// <summary>Sorts by field's natural String sort order, using
+               /// ordinals.  This is functionally equivalent to {@link
+               /// StringValComparator}, but it first resolves the string
+               /// to their relative ordinal positions (using the index
+               /// returned by {@link FieldCache#getStringIndex}), and
+               /// does most comparisons using the ordinals.  For medium
+               /// to large results, this comparator will be much faster
+               /// than {@link StringValComparator}.  For very small
+               /// result sets it may be slower. 
+               /// </summary>
+               public sealed class StringOrdValComparator:FieldComparator
+               {
+                       
+                       private int[] ords;
+                       private System.String[] values;
+                       private int[] readerGen;
+                       
+                       private int currentReaderGen = - 1;
+                       private System.String[] lookup;
+                       private int[] order;
+                       private System.String field;
+                       
+                       private int bottomSlot = - 1;
+                       private int bottomOrd;
+                       private System.String bottomValue;
+                       private bool reversed;
+                       private int sortPos;
+                       
+                       public StringOrdValComparator(int numHits, System.String field, int sortPos, bool reversed)
+                       {
+                               ords = new int[numHits];
+                               values = new System.String[numHits];
+                               readerGen = new int[numHits];
+                               this.sortPos = sortPos;
+                               this.reversed = reversed;
+                               this.field = field;
+                       }
+                       
+                       public override int Compare(int slot1, int slot2)
+                       {
+                               if (readerGen[slot1] == readerGen[slot2])
+                               {
+                                       int cmp = ords[slot1] - ords[slot2];
+                                       if (cmp != 0)
+                                       {
+                                               return cmp;
+                                       }
+                               }
+                               
+                               System.String val1 = values[slot1];
+                               System.String val2 = values[slot2];
+                               if (val1 == null)
+                               {
+                                       if (val2 == null)
+                                       {
+                                               return 0;
+                                       }
+                                       return - 1;
+                               }
+                               else if (val2 == null)
+                               {
+                                       return 1;
+                               }
+                               return String.CompareOrdinal(val1, val2);
+                       }
+                       
+                       public override int CompareBottom(int doc)
+                       {
+                               System.Diagnostics.Debug.Assert(bottomSlot != - 1);
+                               int order = this.order[doc];
+                               int cmp = bottomOrd - order;
+                               if (cmp != 0)
+                               {
+                                       return cmp;
+                               }
+                               
+                               System.String val2 = lookup[order];
+                               if (bottomValue == null)
+                               {
+                                       if (val2 == null)
+                                       {
+                                               return 0;
+                                       }
+                                       // bottom wins
+                                       return - 1;
+                               }
+                               else if (val2 == null)
+                               {
+                                       // doc wins
+                                       return 1;
+                               }
+                               return String.CompareOrdinal(bottomValue, val2);
+                       }
+                       
+                       private void  Convert(int slot)
+                       {
+                               readerGen[slot] = currentReaderGen;
+                               int index = 0;
+                               System.String value_Renamed = values[slot];
+                               if (value_Renamed == null)
+                               {
+                                       ords[slot] = 0;
+                                       return ;
+                               }
+                               
+                               if (sortPos == 0 && bottomSlot != - 1 && bottomSlot != slot)
+                               {
+                                       // Since we are the primary sort, the entries in the
+                                       // queue are bounded by bottomOrd:
+                                       System.Diagnostics.Debug.Assert(bottomOrd < lookup.Length);
+                                       if (reversed)
+                                       {
+                                               index = BinarySearch(lookup, value_Renamed, bottomOrd, lookup.Length - 1);
+                                       }
+                                       else
+                                       {
+                                               index = BinarySearch(lookup, value_Renamed, 0, bottomOrd);
+                                       }
+                               }
+                               else
+                               {
+                                       // Full binary search
+                                       index = BinarySearch(lookup, value_Renamed);
+                               }
+                               
+                               if (index < 0)
+                               {
+                                       index = - index - 2;
+                               }
+                               ords[slot] = index;
+                       }
+                       
+                       public override void  Copy(int slot, int doc)
+                       {
+                               int ord = order[doc];
+                               ords[slot] = ord;
+                               System.Diagnostics.Debug.Assert(ord >= 0);
+                               values[slot] = lookup[ord];
+                               readerGen[slot] = currentReaderGen;
+                       }
+                       
+                       public override void  SetNextReader(IndexReader reader, int docBase)
+                       {
+                               StringIndex currentReaderValues = Mono.Lucene.Net.Search.FieldCache_Fields.DEFAULT.GetStringIndex(reader, field);
+                               currentReaderGen++;
+                               order = currentReaderValues.order;
+                               lookup = currentReaderValues.lookup;
+                               System.Diagnostics.Debug.Assert(lookup.Length > 0);
+                               if (bottomSlot != - 1)
+                               {
+                                       Convert(bottomSlot);
+                                       bottomOrd = ords[bottomSlot];
+                               }
+                       }
+                       
+                       public override void  SetBottom(int bottom)
+                       {
+                               bottomSlot = bottom;
+                               if (readerGen[bottom] != currentReaderGen)
+                               {
+                                       Convert(bottomSlot);
+                               }
+                               bottomOrd = ords[bottom];
+                               System.Diagnostics.Debug.Assert(bottomOrd >= 0);
+                               System.Diagnostics.Debug.Assert(bottomOrd < lookup.Length);
+                               bottomValue = values[bottom];
+                       }
+                       
+                       public override System.IComparable Value(int slot)
+                       {
+                               return values[slot];
+                       }
+                       
+                       public System.String[] GetValues()
+                       {
+                               return values;
+                       }
+                       
+                       public int GetBottomSlot()
+                       {
+                               return bottomSlot;
+                       }
+                       
+                       public System.String GetField()
+                       {
+                               return field;
+                       }
+               }
+               
+               /// <summary>Sorts by field's natural String sort order.  All
+               /// comparisons are done using String.compareTo, which is
+               /// slow for medium to large result sets but possibly
+               /// very fast for very small results sets. 
+               /// </summary>
+               public sealed class StringValComparator:FieldComparator
+               {
+                       
+                       private System.String[] values;
+                       private System.String[] currentReaderValues;
+                       private System.String field;
+                       private System.String bottom;
+                       
+                       internal StringValComparator(int numHits, System.String field)
+                       {
+                               values = new System.String[numHits];
+                               this.field = field;
+                       }
+                       
+                       public override int Compare(int slot1, int slot2)
+                       {
+                               System.String val1 = values[slot1];
+                               System.String val2 = values[slot2];
+                               if (val1 == null)
+                               {
+                                       if (val2 == null)
+                                       {
+                                               return 0;
+                                       }
+                                       return - 1;
+                               }
+                               else if (val2 == null)
+                               {
+                                       return 1;
+                               }
+                               
+                               return String.CompareOrdinal(val1, val2);
+                       }
+                       
+                       public override int CompareBottom(int doc)
+                       {
+                               System.String val2 = currentReaderValues[doc];
+                               if (bottom == null)
+                               {
+                                       if (val2 == null)
+                                       {
+                                               return 0;
+                                       }
+                                       return - 1;
+                               }
+                               else if (val2 == null)
+                               {
+                                       return 1;
+                               }
+                               return String.CompareOrdinal(bottom, val2);
+                       }
+                       
+                       public override void  Copy(int slot, int doc)
+                       {
+                               values[slot] = currentReaderValues[doc];
+                       }
+                       
+                       public override void  SetNextReader(IndexReader reader, int docBase)
+                       {
+                               currentReaderValues = Mono.Lucene.Net.Search.FieldCache_Fields.DEFAULT.GetStrings(reader, field);
+                       }
+                       
+                       public override void  SetBottom(int bottom)
+                       {
+                               this.bottom = values[bottom];
+                       }
+                       
+                       public override System.IComparable Value(int slot)
+                       {
+                               return values[slot];
+                       }
+               }
+               
+               protected internal static int BinarySearch(System.String[] a, System.String key)
+               {
+                       return BinarySearch(a, key, 0, a.Length - 1);
+               }
+               
+               protected internal static int BinarySearch(System.String[] a, System.String key, int low, int high)
+               {
+                       
+                       while (low <= high)
+                       {
+                               int mid = SupportClass.Number.URShift((low + high), 1);
+                               System.String midVal = a[mid];
+                               int cmp;
+                               if (midVal != null)
+                               {
+                                       cmp = String.CompareOrdinal(midVal, key);
+                               }
+                               else
+                               {
+                                       cmp = - 1;
+                               }
+                               
+                               if (cmp < 0)
+                                       low = mid + 1;
+                               else if (cmp > 0)
+                                       high = mid - 1;
+                               else
+                                       return mid;
+                       }
+                       return - (low + 1);
+               }
+               
+               /// <summary> Compare hit at slot1 with hit at slot2.
+               /// 
+               /// </summary>
+               /// <param name="slot1">first slot to compare
+               /// </param>
+               /// <param name="slot2">second slot to compare
+               /// </param>
+        /// <returns> any N &lt; 0 if slot2's value is sorted after
+               /// slot1, any N > 0 if the slot2's value is sorted before
+               /// slot1 and 0 if they are equal
+               /// </returns>
+               public abstract int Compare(int slot1, int slot2);
+               
+               /// <summary> Set the bottom slot, ie the "weakest" (sorted last)
+               /// entry in the queue.  When {@link #compareBottom} is
+               /// called, you should compare against this slot.  This
+               /// will always be called before {@link #compareBottom}.
+               /// 
+               /// </summary>
+               /// <param name="slot">the currently weakest (sorted last) slot in the queue
+               /// </param>
+               public abstract void  SetBottom(int slot);
+               
+               /// <summary> Compare the bottom of the queue with doc.  This will
+               /// only invoked after setBottom has been called.  This
+               /// should return the same result as {@link
+               /// #Compare(int,int)}} as if bottom were slot1 and the new
+               /// document were slot 2.
+               /// 
+               /// <p/>For a search that hits many results, this method
+               /// will be the hotspot (invoked by far the most
+               /// frequently).<p/>
+               /// 
+               /// </summary>
+               /// <param name="doc">that was hit
+               /// </param>
+        /// <returns> any N &lt; 0 if the doc's value is sorted after
+               /// the bottom entry (not competitive), any N > 0 if the
+               /// doc's value is sorted before the bottom entry and 0 if
+               /// they are equal.
+               /// </returns>
+               public abstract int CompareBottom(int doc);
+               
+               /// <summary> This method is called when a new hit is competitive.
+               /// You should copy any state associated with this document
+               /// that will be required for future comparisons, into the
+               /// specified slot.
+               /// 
+               /// </summary>
+               /// <param name="slot">which slot to copy the hit to
+               /// </param>
+               /// <param name="doc">docID relative to current reader
+               /// </param>
+               public abstract void  Copy(int slot, int doc);
+               
+               /// <summary> Set a new Reader. All doc correspond to the current Reader.
+               /// 
+               /// </summary>
+               /// <param name="reader">current reader
+               /// </param>
+               /// <param name="docBase">docBase of this reader 
+               /// </param>
+               /// <throws>  IOException </throws>
+               /// <throws>  IOException </throws>
+               public abstract void  SetNextReader(IndexReader reader, int docBase);
+               
+               /// <summary>Sets the Scorer to use in case a document's score is
+               /// needed.
+               /// 
+               /// </summary>
+               /// <param name="scorer">Scorer instance that you should use to
+               /// obtain the current hit's score, if necessary. 
+               /// </param>
+               public virtual void  SetScorer(Scorer scorer)
+               {
+                       // Empty implementation since most comparators don't need the score. This
+                       // can be overridden by those that need it.
+               }
+               
+               /// <summary> Return the actual value in the slot.
+               /// 
+               /// </summary>
+               /// <param name="slot">the value
+               /// </param>
+               /// <returns> value in this slot upgraded to Comparable
+               /// </returns>
+               public abstract System.IComparable Value(int slot);
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/FieldComparatorSource.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/FieldComparatorSource.cs
new file mode 100644 (file)
index 0000000..670116f
--- /dev/null
@@ -0,0 +1,45 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary> Provides a {@link FieldComparator} for custom field sorting.
+       /// 
+       /// <b>NOTE:</b> This API is experimental and might change in
+       /// incompatible ways in the next release.
+       /// 
+       /// </summary>
+       [Serializable]
+       public abstract class FieldComparatorSource
+       {
+               
+               /// <summary> Creates a comparator for the field in the given index.
+               /// 
+               /// </summary>
+               /// <param name="fieldname">Name of the field to create comparator for.
+               /// </param>
+               /// <returns> FieldComparator.
+               /// </returns>
+               /// <throws>  IOException </throws>
+               /// <summary>           If an error occurs reading the index.
+               /// </summary>
+               public abstract FieldComparator NewComparator(System.String fieldname, int numHits, int sortPos, bool reversed);
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/FieldDoc.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/FieldDoc.cs
new file mode 100644 (file)
index 0000000..93308d7
--- /dev/null
@@ -0,0 +1,119 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary> Expert: A ScoreDoc which also contains information about
+       /// how to sort the referenced document.  In addition to the
+       /// document number and score, this object contains an array
+       /// of values for the document from the field(s) used to sort.
+       /// For example, if the sort criteria was to sort by fields
+       /// "a", "b" then "c", the <code>fields</code> object array
+       /// will have three elements, corresponding respectively to
+       /// the term values for the document in fields "a", "b" and "c".
+       /// The class of each element in the array will be either
+       /// Integer, Float or String depending on the type of values
+       /// in the terms of each field.
+       /// 
+       /// <p/>Created: Feb 11, 2004 1:23:38 PM
+       /// 
+       /// </summary>
+       /// <since>   lucene 1.4
+       /// </since>
+       /// <version>  $Id: FieldDoc.java 773194 2009-05-09 10:36:41Z mikemccand $
+       /// </version>
+       /// <seealso cref="ScoreDoc">
+       /// </seealso>
+       /// <seealso cref="TopFieldDocs">
+       /// </seealso>
+       [Serializable]
+       public class FieldDoc:ScoreDoc
+       {
+               
+               /// <summary>Expert: The values which are used to sort the referenced document.
+               /// The order of these will match the original sort criteria given by a
+               /// Sort object.  Each Object will be either an Integer, Float or String,
+               /// depending on the type of values in the terms of the original field.
+               /// </summary>
+               /// <seealso cref="Sort">
+               /// </seealso>
+               /// <seealso cref="Searcher.Search(Query,Filter,int,Sort)">
+               /// </seealso>
+        [NonSerialized]
+               public System.IComparable[] fields;
+               
+               /// <summary>Expert: Creates one of these objects with empty sort information. </summary>
+               public FieldDoc(int doc, float score):base(doc, score)
+               {
+               }
+               
+               /// <summary>Expert: Creates one of these objects with the given sort information. </summary>
+               public FieldDoc(int doc, float score, System.IComparable[] fields):base(doc, score)
+               {
+                       this.fields = fields;
+               }
+               
+               // A convenience method for debugging.
+               public override System.String ToString()
+               {
+                       // super.toString returns the doc and score information, so just add the
+                       // fields information
+                       System.Text.StringBuilder sb = new System.Text.StringBuilder(base.ToString());
+                       sb.Append("[");
+                       for (int i = 0; i < fields.Length; i++)
+                       {
+                               sb.Append(fields[i]).Append(", ");
+                       }
+                       sb.Length -= 2; // discard last ", "
+                       sb.Append("]");
+                       return sb.ToString();
+               }
+
+        #region SERIALIZATION
+        internal object[] fieldsClone = null;
+
+        [System.Runtime.Serialization.OnSerializing]
+        void OnSerializing(System.Runtime.Serialization.StreamingContext context)
+        {
+            if (fields == null) return;
+
+            // Copy "fields" to "fieldsClone"
+            fieldsClone = new object[fields.Length];
+            for (int i = 0; i < fields.Length; i++)
+            {
+                fieldsClone[i] = fields[i];
+            }
+        }
+
+        [System.Runtime.Serialization.OnDeserialized]
+        void OnDeserialized(System.Runtime.Serialization.StreamingContext context)
+        {
+            if (fieldsClone == null) return;
+
+            // Form "fields" from "fieldsClone"
+            fields = new IComparable[fieldsClone.Length];
+            for (int i = 0; i < fields.Length; i++)
+            {
+                fields[i] = (IComparable)fieldsClone[i];
+            }
+        }
+        #endregion
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/FieldDocSortedHitQueue.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/FieldDocSortedHitQueue.cs
new file mode 100644 (file)
index 0000000..b14623f
--- /dev/null
@@ -0,0 +1,253 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using PriorityQueue = Mono.Lucene.Net.Util.PriorityQueue;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary> Expert: Collects sorted results from Searchable's and collates them.
+       /// The elements put into this queue must be of type FieldDoc.
+       /// 
+       /// <p/>Created: Feb 11, 2004 2:04:21 PM
+       /// 
+       /// </summary>
+       /// <since>   lucene 1.4
+       /// </since>
+       /// <version>  $Id: FieldDocSortedHitQueue.java 695514 2008-09-15 15:42:11Z otis $
+       /// </version>
+       class FieldDocSortedHitQueue:PriorityQueue
+       {
+               
+               // this cannot contain AUTO fields - any AUTO fields should
+               // have been resolved by the time this class is used.
+               internal volatile SortField[] fields;
+               
+               // used in the case where the fields are sorted by locale
+               // based strings
+               internal volatile System.Globalization.CompareInfo[] collators;
+               
+               
+               /// <summary> Creates a hit queue sorted by the given list of fields.</summary>
+               /// <param name="fields">Fieldable names, in priority order (highest priority first).
+               /// </param>
+               /// <param name="size"> The number of hits to retain.  Must be greater than zero.
+               /// </param>
+               internal FieldDocSortedHitQueue(SortField[] fields, int size)
+               {
+                       this.fields = fields;
+                       this.collators = HasCollators(fields);
+                       Initialize(size);
+               }
+               
+               
+               /// <summary> Allows redefinition of sort fields if they are <code>null</code>.
+               /// This is to handle the case using ParallelMultiSearcher where the
+               /// original list contains AUTO and we don't know the actual sort
+               /// type until the values come back.  The fields can only be set once.
+               /// This method is thread safe.
+               /// </summary>
+               /// <param name="fields">
+               /// </param>
+               internal virtual void  SetFields(SortField[] fields)
+               {
+                       lock (this)
+                       {
+                               if (this.fields == null)
+                               {
+                                       this.fields = fields;
+                                       this.collators = HasCollators(fields);
+                               }
+                       }
+               }
+               
+               
+               /// <summary>Returns the fields being used to sort. </summary>
+               internal virtual SortField[] GetFields()
+               {
+                       return fields;
+               }
+               
+               
+               /// <summary>Returns an array of collators, possibly <code>null</code>.  The collators
+               /// correspond to any SortFields which were given a specific locale.
+               /// </summary>
+               /// <param name="fields">Array of sort fields.
+               /// </param>
+               /// <returns> Array, possibly <code>null</code>.
+               /// </returns>
+               private System.Globalization.CompareInfo[] HasCollators(SortField[] fields)
+               {
+                       if (fields == null)
+                               return null;
+                       System.Globalization.CompareInfo[] ret = new System.Globalization.CompareInfo[fields.Length];
+                       for (int i = 0; i < fields.Length; ++i)
+                       {
+                               System.Globalization.CultureInfo locale = fields[i].GetLocale();
+                               if (locale != null)
+                                       ret[i] = locale.CompareInfo;
+                       }
+                       return ret;
+               }
+               
+               
+               /// <summary> Returns whether <code>a</code> is less relevant than <code>b</code>.</summary>
+               /// <param name="a">ScoreDoc
+               /// </param>
+               /// <param name="b">ScoreDoc
+               /// </param>
+               /// <returns> <code>true</code> if document <code>a</code> should be sorted after document <code>b</code>.
+               /// </returns>
+               public override bool LessThan(System.Object a, System.Object b)
+               {
+                       FieldDoc docA = (FieldDoc) a;
+                       FieldDoc docB = (FieldDoc) b;
+                       int n = fields.Length;
+                       int c = 0;
+                       for (int i = 0; i < n && c == 0; ++i)
+                       {
+                               int type = fields[i].GetType();
+                               switch (type)
+                               {
+                                       
+                                       case SortField.SCORE:  {
+                                                       float r1 = (float) ((System.Single) docA.fields[i]);
+                                                       float r2 = (float) ((System.Single) docB.fields[i]);
+                                                       if (r1 > r2)
+                                                               c = - 1;
+                                                       if (r1 < r2)
+                                                               c = 1;
+                                                       break;
+                                               }
+                                       
+                                       case SortField.DOC: 
+                                       case SortField.INT:  {
+                                                       int i1 = ((System.Int32) docA.fields[i]);
+                                                       int i2 = ((System.Int32) docB.fields[i]);
+                                                       if (i1 < i2)
+                                                               c = - 1;
+                                                       if (i1 > i2)
+                                                               c = 1;
+                                                       break;
+                                               }
+                                       
+                                       case SortField.LONG:  {
+                                                       long l1 = (long) ((System.Int64) docA.fields[i]);
+                                                       long l2 = (long) ((System.Int64) docB.fields[i]);
+                                                       if (l1 < l2)
+                                                               c = - 1;
+                                                       if (l1 > l2)
+                                                               c = 1;
+                                                       break;
+                                               }
+                                       
+                                       case SortField.STRING:  {
+                                                       System.String s1 = (System.String) docA.fields[i];
+                                                       System.String s2 = (System.String) docB.fields[i];
+                                                       // null values need to be sorted first, because of how FieldCache.getStringIndex()
+                                                       // works - in that routine, any documents without a value in the given field are
+                                                       // put first.  If both are null, the next SortField is used
+                                                       if (s1 == null)
+                                                               c = (s2 == null)?0:- 1;
+                                                       else if (s2 == null)
+                                                               c = 1;
+                                                       // 
+                                                       else if (fields[i].GetLocale() == null)
+                                                       {
+                                                               c = String.CompareOrdinal(s1, s2);
+                                                       }
+                                                       else
+                                                       {
+                                                               c = collators[i].Compare(s1.ToString(), s2.ToString());
+                                                       }
+                                                       break;
+                                               }
+                                       
+                                       case SortField.FLOAT:  {
+                                                       float f1 = (float) ((System.Single) docA.fields[i]);
+                                                       float f2 = (float) ((System.Single) docB.fields[i]);
+                                                       if (f1 < f2)
+                                                               c = - 1;
+                                                       if (f1 > f2)
+                                                               c = 1;
+                                                       break;
+                                               }
+                                       
+                                       case SortField.DOUBLE:  {
+                                                       double d1 = ((System.Double) docA.fields[i]);
+                                                       double d2 = ((System.Double) docB.fields[i]);
+                                                       if (d1 < d2)
+                                                               c = - 1;
+                                                       if (d1 > d2)
+                                                               c = 1;
+                                                       break;
+                                               }
+                                       
+                                       case SortField.BYTE:  {
+                                                       int i1 = (sbyte) ((System.SByte) docA.fields[i]);
+                                                       int i2 = (sbyte) ((System.SByte) docB.fields[i]);
+                                                       if (i1 < i2)
+                                                               c = - 1;
+                                                       if (i1 > i2)
+                                                               c = 1;
+                                                       break;
+                                               }
+                                       
+                                       case SortField.SHORT:  {
+                                                       int i1 = (short) ((System.Int16) docA.fields[i]);
+                                                       int i2 = (short) ((System.Int16) docB.fields[i]);
+                                                       if (i1 < i2)
+                                                               c = - 1;
+                                                       if (i1 > i2)
+                                                               c = 1;
+                                                       break;
+                                               }
+                                       
+                                       case SortField.CUSTOM:  {
+                                                       c = docA.fields[i].CompareTo(docB.fields[i]);
+                                                       break;
+                                               }
+                                       
+                                       case SortField.AUTO:  {
+                                                       // we cannot handle this - even if we determine the type of object (Float or
+                                                       // Integer), we don't necessarily know how to compare them (both SCORE and
+                                                       // FLOAT contain floats, but are sorted opposite of each other). Before
+                                                       // we get here, each AUTO should have been replaced with its actual value.
+                                                       throw new System.SystemException("FieldDocSortedHitQueue cannot use an AUTO SortField");
+                                               }
+                                       
+                                       default:  {
+                                                       throw new System.SystemException("invalid SortField type: " + type);
+                                               }
+                                       
+                               }
+                               if (fields[i].GetReverse())
+                               {
+                                       c = - c;
+                               }
+                       }
+                       
+                       // avoid random sort order that could lead to duplicates (bug #31241):
+                       if (c == 0)
+                               return docA.doc > docB.doc;
+                       
+                       return c > 0;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/FieldSortedHitQueue.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/FieldSortedHitQueue.cs
new file mode 100644 (file)
index 0000000..d215b64
--- /dev/null
@@ -0,0 +1,718 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+using PriorityQueue = Mono.Lucene.Net.Util.PriorityQueue;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary> Expert: A hit queue for sorting by hits by terms in more than one field.
+       /// Uses <code>FieldCache.DEFAULT</code> for maintaining internal term lookup tables.
+       /// 
+       /// <p/>Created: Dec 8, 2003 12:56:03 PM
+       /// 
+       /// </summary>
+       /// <since>   lucene 1.4
+       /// </since>
+       /// <version>  $Id: FieldSortedHitQueue.java 803676 2009-08-12 19:31:38Z hossman $
+       /// </version>
+       /// <seealso cref="Searcher.Search(Query,Filter,int,Sort)">
+       /// </seealso>
+       /// <seealso cref="FieldCache">
+       /// </seealso>
+       /// <deprecated> see {@link FieldValueHitQueue}
+       /// </deprecated>
+    [Obsolete("see FieldValueHitQueue")]
+       public class FieldSortedHitQueue:PriorityQueue
+       {
+               internal class AnonymousClassCache:FieldCacheImpl.Cache
+               {
+                       
+                       protected internal override System.Object CreateValue(IndexReader reader, FieldCacheImpl.Entry entryKey)
+                       {
+                               FieldCacheImpl.Entry entry = (FieldCacheImpl.Entry) entryKey;
+                               System.String fieldname = entry.field;
+                               int type = entry.type;
+                               System.Globalization.CultureInfo locale = entry.locale;
+                               Mono.Lucene.Net.Search.Parser parser = null;
+                               SortComparatorSource factory = null;
+                               if (entry.custom is SortComparatorSource)
+                               {
+                                       factory = (SortComparatorSource) entry.custom;
+                               }
+                               else
+                               {
+                                       parser = (Mono.Lucene.Net.Search.Parser) entry.custom;
+                               }
+                               ScoreDocComparator comparator;
+                               switch (type)
+                               {
+                                       
+                                       case SortField.AUTO: 
+                                               comparator = Mono.Lucene.Net.Search.FieldSortedHitQueue.ComparatorAuto(reader, fieldname);
+                                               break;
+                                       
+                                       case SortField.INT: 
+                                               comparator = Mono.Lucene.Net.Search.FieldSortedHitQueue.comparatorInt(reader, fieldname, (Mono.Lucene.Net.Search.IntParser) parser);
+                                               break;
+                                       
+                                       case SortField.FLOAT: 
+                                               comparator = Mono.Lucene.Net.Search.FieldSortedHitQueue.comparatorFloat(reader, fieldname, (Mono.Lucene.Net.Search.FloatParser) parser);
+                                               break;
+                                       
+                                       case SortField.LONG: 
+                                               comparator = Mono.Lucene.Net.Search.FieldSortedHitQueue.comparatorLong(reader, fieldname, (Mono.Lucene.Net.Search.LongParser) parser);
+                                               break;
+                                       
+                                       case SortField.DOUBLE: 
+                                               comparator = Mono.Lucene.Net.Search.FieldSortedHitQueue.comparatorDouble(reader, fieldname, (Mono.Lucene.Net.Search.DoubleParser) parser);
+                                               break;
+                                       
+                                       case SortField.SHORT: 
+                                               comparator = Mono.Lucene.Net.Search.FieldSortedHitQueue.comparatorShort(reader, fieldname, (Mono.Lucene.Net.Search.ShortParser) parser);
+                                               break;
+                                       
+                                       case SortField.BYTE: 
+                                               comparator = Mono.Lucene.Net.Search.FieldSortedHitQueue.comparatorByte(reader, fieldname, (Mono.Lucene.Net.Search.ByteParser) parser);
+                                               break;
+                                       
+                                       case SortField.STRING: 
+                                               if (locale != null)
+                                                       comparator = Mono.Lucene.Net.Search.FieldSortedHitQueue.comparatorStringLocale(reader, fieldname, locale);
+                                               else
+                                                       comparator = Mono.Lucene.Net.Search.FieldSortedHitQueue.comparatorString(reader, fieldname);
+                                               break;
+                                       
+                                       case SortField.CUSTOM: 
+                                               comparator = factory.NewComparator(reader, fieldname);
+                                               break;
+                                       
+                                       default: 
+                                               throw new System.SystemException("unknown field type: " + type);
+                                       
+                               }
+                               return comparator;
+                       }
+               }
+               private class AnonymousClassScoreDocComparator : ScoreDocComparator
+               {
+                       public AnonymousClassScoreDocComparator(sbyte[] fieldOrder)
+                       {
+                               InitBlock(fieldOrder);
+                       }
+                       private void  InitBlock(sbyte[] fieldOrder)
+                       {
+                               this.fieldOrder = fieldOrder;
+                       }
+                       private sbyte[] fieldOrder;
+                       
+                       public int Compare(ScoreDoc i, ScoreDoc j)
+                       {
+                               int fi = fieldOrder[i.doc];
+                               int fj = fieldOrder[j.doc];
+                               if (fi < fj)
+                                       return - 1;
+                               if (fi > fj)
+                                       return 1;
+                               return 0;
+                       }
+                       
+                       public virtual System.IComparable SortValue(ScoreDoc i)
+                       {
+                               return (sbyte) fieldOrder[i.doc];
+                       }
+                       
+                       public virtual int SortType()
+                       {
+                               return SortField.BYTE;
+                       }
+               }
+               private class AnonymousClassScoreDocComparator1 : ScoreDocComparator
+               {
+                       public AnonymousClassScoreDocComparator1(short[] fieldOrder)
+                       {
+                               InitBlock(fieldOrder);
+                       }
+                       private void  InitBlock(short[] fieldOrder)
+                       {
+                               this.fieldOrder = fieldOrder;
+                       }
+                       private short[] fieldOrder;
+                       
+                       public int Compare(ScoreDoc i, ScoreDoc j)
+                       {
+                               int fi = fieldOrder[i.doc];
+                               int fj = fieldOrder[j.doc];
+                               if (fi < fj)
+                                       return - 1;
+                               if (fi > fj)
+                                       return 1;
+                               return 0;
+                       }
+                       
+                       public virtual System.IComparable SortValue(ScoreDoc i)
+                       {
+                               return (short) fieldOrder[i.doc];
+                       }
+                       
+                       public virtual int SortType()
+                       {
+                               return SortField.SHORT;
+                       }
+               }
+               private class AnonymousClassScoreDocComparator2 : ScoreDocComparator
+               {
+                       public AnonymousClassScoreDocComparator2(int[] fieldOrder)
+                       {
+                               InitBlock(fieldOrder);
+                       }
+                       private void  InitBlock(int[] fieldOrder)
+                       {
+                               this.fieldOrder = fieldOrder;
+                       }
+                       private int[] fieldOrder;
+                       
+                       public int Compare(ScoreDoc i, ScoreDoc j)
+                       {
+                               int fi = fieldOrder[i.doc];
+                               int fj = fieldOrder[j.doc];
+                               if (fi < fj)
+                                       return - 1;
+                               if (fi > fj)
+                                       return 1;
+                               return 0;
+                       }
+                       
+                       public virtual System.IComparable SortValue(ScoreDoc i)
+                       {
+                               return (System.Int32) fieldOrder[i.doc];
+                       }
+                       
+                       public virtual int SortType()
+                       {
+                               return SortField.INT;
+                       }
+               }
+               private class AnonymousClassScoreDocComparator3 : ScoreDocComparator
+               {
+                       public AnonymousClassScoreDocComparator3(long[] fieldOrder)
+                       {
+                               InitBlock(fieldOrder);
+                       }
+                       private void  InitBlock(long[] fieldOrder)
+                       {
+                               this.fieldOrder = fieldOrder;
+                       }
+                       private long[] fieldOrder;
+                       
+                       public int Compare(ScoreDoc i, ScoreDoc j)
+                       {
+                               long li = fieldOrder[i.doc];
+                               long lj = fieldOrder[j.doc];
+                               if (li < lj)
+                                       return - 1;
+                               if (li > lj)
+                                       return 1;
+                               return 0;
+                       }
+                       
+                       public virtual System.IComparable SortValue(ScoreDoc i)
+                       {
+                               return (long) fieldOrder[i.doc];
+                       }
+                       
+                       public virtual int SortType()
+                       {
+                               return SortField.LONG;
+                       }
+               }
+               private class AnonymousClassScoreDocComparator4 : ScoreDocComparator
+               {
+                       public AnonymousClassScoreDocComparator4(float[] fieldOrder)
+                       {
+                               InitBlock(fieldOrder);
+                       }
+                       private void  InitBlock(float[] fieldOrder)
+                       {
+                               this.fieldOrder = fieldOrder;
+                       }
+                       private float[] fieldOrder;
+                       
+                       public int Compare(ScoreDoc i, ScoreDoc j)
+                       {
+                               float fi = fieldOrder[i.doc];
+                               float fj = fieldOrder[j.doc];
+                               if (fi < fj)
+                                       return - 1;
+                               if (fi > fj)
+                                       return 1;
+                               return 0;
+                       }
+                       
+                       public virtual System.IComparable SortValue(ScoreDoc i)
+                       {
+                               return (float) fieldOrder[i.doc];
+                       }
+                       
+                       public virtual int SortType()
+                       {
+                               return SortField.FLOAT;
+                       }
+               }
+               private class AnonymousClassScoreDocComparator5 : ScoreDocComparator
+               {
+                       public AnonymousClassScoreDocComparator5(double[] fieldOrder)
+                       {
+                               InitBlock(fieldOrder);
+                       }
+                       private void  InitBlock(double[] fieldOrder)
+                       {
+                               this.fieldOrder = fieldOrder;
+                       }
+                       private double[] fieldOrder;
+                       
+                       public int Compare(ScoreDoc i, ScoreDoc j)
+                       {
+                               double di = fieldOrder[i.doc];
+                               double dj = fieldOrder[j.doc];
+                               if (di < dj)
+                                       return - 1;
+                               if (di > dj)
+                                       return 1;
+                               return 0;
+                       }
+                       
+                       public virtual System.IComparable SortValue(ScoreDoc i)
+                       {
+                               return (double) fieldOrder[i.doc];
+                       }
+                       
+                       public virtual int SortType()
+                       {
+                               return SortField.DOUBLE;
+                       }
+               }
+               private class AnonymousClassScoreDocComparator6 : ScoreDocComparator
+               {
+                       public AnonymousClassScoreDocComparator6(Mono.Lucene.Net.Search.StringIndex index)
+                       {
+                               InitBlock(index);
+                       }
+                       private void  InitBlock(Mono.Lucene.Net.Search.StringIndex index)
+                       {
+                               this.index = index;
+                       }
+                       private Mono.Lucene.Net.Search.StringIndex index;
+                       
+                       public int Compare(ScoreDoc i, ScoreDoc j)
+                       {
+                               int fi = index.order[i.doc];
+                               int fj = index.order[j.doc];
+                               if (fi < fj)
+                                       return - 1;
+                               if (fi > fj)
+                                       return 1;
+                               return 0;
+                       }
+                       
+                       public virtual System.IComparable SortValue(ScoreDoc i)
+                       {
+                               return index.lookup[index.order[i.doc]];
+                       }
+                       
+                       public virtual int SortType()
+                       {
+                               return SortField.STRING;
+                       }
+               }
+               private class AnonymousClassScoreDocComparator7 : ScoreDocComparator
+               {
+                       public AnonymousClassScoreDocComparator7(System.String[] index, System.Globalization.CompareInfo collator)
+                       {
+                               InitBlock(index, collator);
+                       }
+                       private void  InitBlock(System.String[] index, System.Globalization.CompareInfo collator)
+                       {
+                               this.index = index;
+                               this.collator = collator;
+                       }
+                       private System.String[] index;
+                       private System.Globalization.CompareInfo collator;
+                       
+                       public int Compare(ScoreDoc i, ScoreDoc j)
+                       {
+                               System.String is_Renamed = index[i.doc];
+                               System.String js = index[j.doc];
+                               if ((System.Object) is_Renamed == (System.Object) js)
+                               {
+                                       return 0;
+                               }
+                               else if (is_Renamed == null)
+                               {
+                                       return - 1;
+                               }
+                               else if (js == null)
+                               {
+                                       return 1;
+                               }
+                               else
+                               {
+                                       return collator.Compare(is_Renamed.ToString(), js.ToString());
+                               }
+                       }
+                       
+                       public virtual System.IComparable SortValue(ScoreDoc i)
+                       {
+                               return index[i.doc];
+                       }
+                       
+                       public virtual int SortType()
+                       {
+                               return SortField.STRING;
+                       }
+               }
+               
+               /// <summary> Creates a hit queue sorted by the given list of fields.</summary>
+               /// <param name="reader"> Index to use.
+               /// </param>
+               /// <param name="fields">Fieldable names, in priority order (highest priority first).  Cannot be <code>null</code> or empty.
+               /// </param>
+               /// <param name="size"> The number of hits to retain.  Must be greater than zero.
+               /// </param>
+               /// <throws>  IOException </throws>
+               public FieldSortedHitQueue(IndexReader reader, SortField[] fields, int size)
+               {
+                       int n = fields.Length;
+                       comparators = new ScoreDocComparator[n];
+                       this.fields = new SortField[n];
+                       for (int i = 0; i < n; ++i)
+                       {
+                               System.String fieldname = fields[i].GetField();
+                               comparators[i] = GetCachedComparator(reader, fieldname, fields[i].GetType(), fields[i].GetParser(), fields[i].GetLocale(), fields[i].GetFactory());
+                               // new SortField instances must only be created when auto-detection is in use
+                               if (fields[i].GetType() == SortField.AUTO)
+                               {
+                                       if (comparators[i].SortType() == SortField.STRING)
+                                       {
+                                               this.fields[i] = new SortField(fieldname, fields[i].GetLocale(), fields[i].GetReverse());
+                                       }
+                                       else
+                                       {
+                                               this.fields[i] = new SortField(fieldname, comparators[i].SortType(), fields[i].GetReverse());
+                                       }
+                               }
+                               else
+                               {
+                                       System.Diagnostics.Debug.Assert(comparators [i].SortType() == fields [i].GetType());
+                                       this.fields[i] = fields[i];
+                               }
+                       }
+                       Initialize(size);
+               }
+               
+               
+               /// <summary>Stores a comparator corresponding to each field being sorted by </summary>
+               protected internal ScoreDocComparator[] comparators;
+               
+               /// <summary>Stores the sort criteria being used. </summary>
+               protected internal SortField[] fields;
+               
+               /// <summary>Stores the maximum score value encountered, needed for normalizing. </summary>
+               protected internal float maxscore = System.Single.NegativeInfinity;
+               
+               /// <summary>returns the maximum score encountered by elements inserted via insert()</summary>
+               public virtual float GetMaxScore()
+               {
+                       return maxscore;
+               }
+               
+               // Update maxscore.
+               private void  UpdateMaxScore(FieldDoc fdoc)
+               {
+                       maxscore = System.Math.Max(maxscore, fdoc.score);
+               }
+               
+               // The signature of this method takes a FieldDoc in order to avoid
+               // the unneeded cast to retrieve the score.
+               // inherit javadoc
+               public virtual bool Insert(FieldDoc fdoc)
+               {
+                       UpdateMaxScore(fdoc);
+                       return base.Insert(fdoc);
+               }
+               
+               // This overrides PriorityQueue.insert() so that insert(FieldDoc) that
+               // keeps track of the score isn't accidentally bypassed.  
+               // inherit javadoc
+        [Obsolete("Mono.Lucene.Net-2.9.1. This method overrides obsolete member Mono.Lucene.Net.Util.PriorityQueue.Insert(object)")]
+               public override bool Insert(System.Object fdoc)
+               {
+                       return Insert((FieldDoc) fdoc);
+               }
+               
+               // This overrides PriorityQueue.insertWithOverflow() so that
+               // updateMaxScore(FieldDoc) that keeps track of the score isn't accidentally
+               // bypassed.
+               public override System.Object InsertWithOverflow(System.Object element)
+               {
+                       UpdateMaxScore((FieldDoc) element);
+                       return base.InsertWithOverflow(element);
+               }
+               
+               /// <summary> Returns whether <code>a</code> is less relevant than <code>b</code>.</summary>
+               /// <param name="a">ScoreDoc
+               /// </param>
+               /// <param name="b">ScoreDoc
+               /// </param>
+               /// <returns> <code>true</code> if document <code>a</code> should be sorted after document <code>b</code>.
+               /// </returns>
+               public override bool LessThan(System.Object a, System.Object b)
+               {
+                       ScoreDoc docA = (ScoreDoc) a;
+                       ScoreDoc docB = (ScoreDoc) b;
+                       
+                       // run comparators
+                       int n = comparators.Length;
+                       int c = 0;
+                       for (int i = 0; i < n && c == 0; ++i)
+                       {
+                               c = (fields[i].reverse)?comparators[i].Compare(docB, docA):comparators[i].Compare(docA, docB);
+                       }
+                       // avoid random sort order that could lead to duplicates (bug #31241):
+                       if (c == 0)
+                               return docA.doc > docB.doc;
+                       return c > 0;
+               }
+               
+               
+               /// <summary> Given a FieldDoc object, stores the values used
+               /// to sort the given document.  These values are not the raw
+               /// values out of the index, but the internal representation
+               /// of them.  This is so the given search hit can be collated
+               /// by a MultiSearcher with other search hits.
+               /// </summary>
+               /// <param name="doc"> The FieldDoc to store sort values into.
+               /// </param>
+               /// <returns>  The same FieldDoc passed in.
+               /// </returns>
+               /// <seealso cref="Searchable.Search(Weight,Filter,int,Sort)">
+               /// </seealso>
+               internal virtual FieldDoc FillFields(FieldDoc doc)
+               {
+                       int n = comparators.Length;
+                       System.IComparable[] fields = new System.IComparable[n];
+                       for (int i = 0; i < n; ++i)
+                               fields[i] = comparators[i].SortValue(doc);
+                       doc.fields = fields;
+                       //if (maxscore > 1.0f) doc.score /= maxscore;   // normalize scores
+                       return doc;
+               }
+               
+               
+               /// <summary>Returns the SortFields being used by this hit queue. </summary>
+               internal virtual SortField[] GetFields()
+               {
+                       return fields;
+               }
+               
+               internal static ScoreDocComparator GetCachedComparator(IndexReader reader, System.String field, int type, Mono.Lucene.Net.Search.Parser parser, System.Globalization.CultureInfo locale, SortComparatorSource factory)
+               {
+                       if (type == SortField.DOC)
+                               return Mono.Lucene.Net.Search.ScoreDocComparator_Fields.INDEXORDER;
+                       if (type == SortField.SCORE)
+                               return Mono.Lucene.Net.Search.ScoreDocComparator_Fields.RELEVANCE;
+                       FieldCacheImpl.Entry entry = (factory != null)?new FieldCacheImpl.Entry(field, factory):((parser != null)?new FieldCacheImpl.Entry(field, type, parser):new FieldCacheImpl.Entry(field, type, locale));
+                       return (ScoreDocComparator) Comparators.Get(reader, entry);
+               }
+               
+               /// <summary>Internal cache of comparators. Similar to FieldCache, only
+               /// caches comparators instead of term values. 
+               /// </summary>
+               internal static readonly FieldCacheImpl.Cache Comparators;
+               
+               /// <summary> Returns a comparator for sorting hits according to a field containing bytes.</summary>
+               /// <param name="reader"> Index to use.
+               /// </param>
+               /// <param name="fieldname"> Fieldable containing integer values.
+               /// </param>
+               /// <returns>  Comparator for sorting hits.
+               /// </returns>
+               /// <throws>  IOException If an error occurs reading the index. </throws>
+               internal static ScoreDocComparator comparatorByte(IndexReader reader, System.String fieldname, Mono.Lucene.Net.Search.ByteParser parser)
+               {
+                       System.String field = String.Intern(fieldname);
+                       sbyte[] fieldOrder = Mono.Lucene.Net.Search.FieldCache_Fields.DEFAULT.GetBytes(reader, field, parser);
+                       return new AnonymousClassScoreDocComparator(fieldOrder);
+               }
+               
+               /// <summary> Returns a comparator for sorting hits according to a field containing shorts.</summary>
+               /// <param name="reader"> Index to use.
+               /// </param>
+               /// <param name="fieldname"> Fieldable containing integer values.
+               /// </param>
+               /// <returns>  Comparator for sorting hits.
+               /// </returns>
+               /// <throws>  IOException If an error occurs reading the index. </throws>
+               internal static ScoreDocComparator comparatorShort(IndexReader reader, System.String fieldname, Mono.Lucene.Net.Search.ShortParser parser)
+               {
+                       System.String field = String.Intern(fieldname);
+                       short[] fieldOrder = Mono.Lucene.Net.Search.FieldCache_Fields.DEFAULT.GetShorts(reader, field, parser);
+                       return new AnonymousClassScoreDocComparator1(fieldOrder);
+               }
+               
+               /// <summary> Returns a comparator for sorting hits according to a field containing integers.</summary>
+               /// <param name="reader"> Index to use.
+               /// </param>
+               /// <param name="fieldname"> Fieldable containing integer values.
+               /// </param>
+               /// <returns>  Comparator for sorting hits.
+               /// </returns>
+               /// <throws>  IOException If an error occurs reading the index. </throws>
+               internal static ScoreDocComparator comparatorInt(IndexReader reader, System.String fieldname, Mono.Lucene.Net.Search.IntParser parser)
+               {
+                       System.String field = String.Intern(fieldname);
+                       int[] fieldOrder = Mono.Lucene.Net.Search.FieldCache_Fields.DEFAULT.GetInts(reader, field, parser);
+                       return new AnonymousClassScoreDocComparator2(fieldOrder);
+               }
+               
+               /// <summary> Returns a comparator for sorting hits according to a field containing integers.</summary>
+               /// <param name="reader"> Index to use.
+               /// </param>
+               /// <param name="fieldname"> Fieldable containing integer values.
+               /// </param>
+               /// <returns>  Comparator for sorting hits.
+               /// </returns>
+               /// <throws>  IOException If an error occurs reading the index. </throws>
+               internal static ScoreDocComparator comparatorLong(IndexReader reader, System.String fieldname, Mono.Lucene.Net.Search.LongParser parser)
+               {
+                       System.String field = String.Intern(fieldname);
+                       long[] fieldOrder = Mono.Lucene.Net.Search.FieldCache_Fields.DEFAULT.GetLongs(reader, field, parser);
+                       return new AnonymousClassScoreDocComparator3(fieldOrder);
+               }
+               
+               
+               /// <summary> Returns a comparator for sorting hits according to a field containing floats.</summary>
+               /// <param name="reader"> Index to use.
+               /// </param>
+               /// <param name="fieldname"> Fieldable containing float values.
+               /// </param>
+               /// <returns>  Comparator for sorting hits.
+               /// </returns>
+               /// <throws>  IOException If an error occurs reading the index. </throws>
+               internal static ScoreDocComparator comparatorFloat(IndexReader reader, System.String fieldname, Mono.Lucene.Net.Search.FloatParser parser)
+               {
+                       System.String field = String.Intern(fieldname);
+                       float[] fieldOrder = Mono.Lucene.Net.Search.FieldCache_Fields.DEFAULT.GetFloats(reader, field, parser);
+                       return new AnonymousClassScoreDocComparator4(fieldOrder);
+               }
+               
+               /// <summary> Returns a comparator for sorting hits according to a field containing doubles.</summary>
+               /// <param name="reader"> Index to use.
+               /// </param>
+               /// <param name="fieldname"> Fieldable containing float values.
+               /// </param>
+               /// <returns>  Comparator for sorting hits.
+               /// </returns>
+               /// <throws>  IOException If an error occurs reading the index. </throws>
+               internal static ScoreDocComparator comparatorDouble(IndexReader reader, System.String fieldname, Mono.Lucene.Net.Search.DoubleParser parser)
+               {
+                       System.String field = String.Intern(fieldname);
+                       double[] fieldOrder = Mono.Lucene.Net.Search.FieldCache_Fields.DEFAULT.GetDoubles(reader, field, parser);
+                       return new AnonymousClassScoreDocComparator5(fieldOrder);
+               }
+               
+               /// <summary> Returns a comparator for sorting hits according to a field containing strings.</summary>
+               /// <param name="reader"> Index to use.
+               /// </param>
+               /// <param name="fieldname"> Fieldable containing string values.
+               /// </param>
+               /// <returns>  Comparator for sorting hits.
+               /// </returns>
+               /// <throws>  IOException If an error occurs reading the index. </throws>
+               internal static ScoreDocComparator comparatorString(IndexReader reader, System.String fieldname)
+               {
+                       System.String field = String.Intern(fieldname);
+                       Mono.Lucene.Net.Search.StringIndex index = Mono.Lucene.Net.Search.FieldCache_Fields.DEFAULT.GetStringIndex(reader, field);
+                       return new AnonymousClassScoreDocComparator6(index);
+               }
+               
+               /// <summary> Returns a comparator for sorting hits according to a field containing strings.</summary>
+               /// <param name="reader"> Index to use.
+               /// </param>
+               /// <param name="fieldname"> Fieldable containing string values.
+               /// </param>
+               /// <returns>  Comparator for sorting hits.
+               /// </returns>
+               /// <throws>  IOException If an error occurs reading the index. </throws>
+               internal static ScoreDocComparator comparatorStringLocale(IndexReader reader, System.String fieldname, System.Globalization.CultureInfo locale)
+               {
+                       System.Globalization.CompareInfo collator = locale.CompareInfo;
+                       System.String field = String.Intern(fieldname);
+                       System.String[] index = Mono.Lucene.Net.Search.FieldCache_Fields.DEFAULT.GetStrings(reader, field);
+                       return new AnonymousClassScoreDocComparator7(index, collator);
+               }
+               
+               /// <summary> Returns a comparator for sorting hits according to values in the given field.
+               /// The terms in the field are looked at to determine whether they contain integers,
+               /// floats or strings.  Once the type is determined, one of the other static methods
+               /// in this class is called to get the comparator.
+               /// </summary>
+               /// <param name="reader"> Index to use.
+               /// </param>
+               /// <param name="fieldname"> Fieldable containing values.
+               /// </param>
+               /// <returns>  Comparator for sorting hits.
+               /// </returns>
+               /// <throws>  IOException If an error occurs reading the index. </throws>
+               internal static ScoreDocComparator ComparatorAuto(IndexReader reader, System.String fieldname)
+               {
+                       System.String field = String.Intern(fieldname);
+                       System.Object lookupArray = Mono.Lucene.Net.Search.FieldCache_Fields.DEFAULT.GetAuto(reader, field);
+                       if (lookupArray is Mono.Lucene.Net.Search.StringIndex)
+                       {
+                               return comparatorString(reader, field);
+                       }
+                       else if (lookupArray is int[])
+                       {
+                               return comparatorInt(reader, field, null);
+                       }
+                       else if (lookupArray is long[])
+                       {
+                               return comparatorLong(reader, field, null);
+                       }
+                       else if (lookupArray is float[])
+                       {
+                               return comparatorFloat(reader, field, null);
+                       }
+                       else if (lookupArray is System.String[])
+                       {
+                               return comparatorString(reader, field);
+                       }
+                       else
+                       {
+                               throw new System.SystemException("unknown data type in field '" + field + "'");
+                       }
+               }
+               static FieldSortedHitQueue()
+               {
+                       Comparators = new AnonymousClassCache();
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/FieldValueHitQueue.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/FieldValueHitQueue.cs
new file mode 100644 (file)
index 0000000..bee0cd4
--- /dev/null
@@ -0,0 +1,263 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using PriorityQueue = Mono.Lucene.Net.Util.PriorityQueue;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary> Expert: A hit queue for sorting by hits by terms in more than one field.
+       /// Uses <code>FieldCache.DEFAULT</code> for maintaining
+       /// internal term lookup tables.
+       /// 
+       /// This class will not resolve SortField.AUTO types, and expects the type
+       /// of all SortFields used for construction to already have been resolved. 
+       /// {@link SortField#DetectFieldType(IndexReader, String)} is a utility method which
+       /// may be used for field type detection.
+       /// 
+       /// <b>NOTE:</b> This API is experimental and might change in
+       /// incompatible ways in the next release.
+       /// 
+       /// </summary>
+       /// <since> 2.9
+       /// </since>
+       /// <version>  $Id:
+       /// </version>
+       /// <seealso cref="Searcher.Search(Query,Filter,int,Sort)">
+       /// </seealso>
+       /// <seealso cref="FieldCache">
+       /// </seealso>
+       public abstract class FieldValueHitQueue:PriorityQueue
+       {
+               
+               internal sealed class Entry
+               {
+                       internal int slot;
+                       internal int docID;
+                       internal float score;
+                       
+                       internal Entry(int slot, int docID, float score)
+                       {
+                               this.slot = slot;
+                               this.docID = docID;
+                               this.score = score;
+                       }
+                       
+                       public override System.String ToString()
+                       {
+                               return "slot:" + slot + " docID:" + docID + " score=" + score;
+                       }
+               }
+               
+               /// <summary> An implementation of {@link FieldValueHitQueue} which is optimized in case
+               /// there is just one comparator.
+               /// </summary>
+               private sealed class OneComparatorFieldValueHitQueue:FieldValueHitQueue
+               {
+                       
+                       private FieldComparator comparator;
+                       private int oneReverseMul;
+                       
+                       public OneComparatorFieldValueHitQueue(SortField[] fields, int size):base(fields)
+                       {
+                               if (fields.Length == 0)
+                               {
+                                       throw new System.ArgumentException("Sort must contain at least one field");
+                               }
+                               
+                               SortField field = fields[0];
+                               // AUTO is resolved before we are called
+                               System.Diagnostics.Debug.Assert(field.GetType() != SortField.AUTO);
+                               comparator = field.GetComparator(size, 0);
+                               oneReverseMul = field.reverse?- 1:1;
+                               
+                               comparators[0] = comparator;
+                               reverseMul[0] = oneReverseMul;
+                               
+                               Initialize(size);
+                       }
+                       
+                       /// <summary> Returns whether <code>a</code> is less relevant than <code>b</code>.</summary>
+                       /// <param name="a">ScoreDoc
+                       /// </param>
+                       /// <param name="b">ScoreDoc
+                       /// </param>
+                       /// <returns> <code>true</code> if document <code>a</code> should be sorted after document <code>b</code>.
+                       /// </returns>
+                       public override bool LessThan(System.Object a, System.Object b)
+                       {
+                               Entry hitA = (Entry) a;
+                               Entry hitB = (Entry) b;
+                               
+                               System.Diagnostics.Debug.Assert(hitA != hitB);
+                               System.Diagnostics.Debug.Assert(hitA.slot != hitB.slot);
+                               
+                               int c = oneReverseMul * comparator.Compare(hitA.slot, hitB.slot);
+                               if (c != 0)
+                               {
+                                       return c > 0;
+                               }
+                               
+                               // avoid random sort order that could lead to duplicates (bug #31241):
+                               return hitA.docID > hitB.docID;
+                       }
+               }
+               
+               /// <summary> An implementation of {@link FieldValueHitQueue} which is optimized in case
+               /// there is more than one comparator.
+               /// </summary>
+               private sealed class MultiComparatorsFieldValueHitQueue:FieldValueHitQueue
+               {
+                       
+                       public MultiComparatorsFieldValueHitQueue(SortField[] fields, int size):base(fields)
+                       {
+                               
+                               int numComparators = comparators.Length;
+                               for (int i = 0; i < numComparators; ++i)
+                               {
+                                       SortField field = fields[i];
+                                       
+                                       // AUTO is resolved before we are called
+                                       System.Diagnostics.Debug.Assert(field.GetType() != SortField.AUTO);
+                                       
+                                       reverseMul[i] = field.reverse?- 1:1;
+                                       comparators[i] = field.GetComparator(size, i);
+                               }
+                               
+                               Initialize(size);
+                       }
+                       
+                       public override bool LessThan(System.Object a, System.Object b)
+                       {
+                               Entry hitA = (Entry) a;
+                               Entry hitB = (Entry) b;
+                               
+                               System.Diagnostics.Debug.Assert(hitA != hitB);
+                               System.Diagnostics.Debug.Assert(hitA.slot != hitB.slot);
+                               
+                               int numComparators = comparators.Length;
+                               for (int i = 0; i < numComparators; ++i)
+                               {
+                                       int c = reverseMul[i] * comparators[i].Compare(hitA.slot, hitB.slot);
+                                       if (c != 0)
+                                       {
+                                               // Short circuit
+                                               return c > 0;
+                                       }
+                               }
+                               
+                               // avoid random sort order that could lead to duplicates (bug #31241):
+                               return hitA.docID > hitB.docID;
+                       }
+               }
+               
+               // prevent instantiation and extension.
+               private FieldValueHitQueue(SortField[] fields)
+               {
+                       // When we get here, fields.length is guaranteed to be > 0, therefore no
+                       // need to check it again.
+                       
+                       // All these are required by this class's API - need to return arrays.
+                       // Therefore even in the case of a single comparator, create an array
+                       // anyway.
+                       this.fields = fields;
+                       int numComparators = fields.Length;
+                       comparators = new FieldComparator[numComparators];
+                       reverseMul = new int[numComparators];
+               }
+               
+               /// <summary> Creates a hit queue sorted by the given list of fields.
+               /// 
+               /// <p/><b>NOTE</b>: The instances returned by this method
+               /// pre-allocate a full array of length <code>numHits</code>.
+               /// 
+               /// </summary>
+               /// <param name="fields">SortField array we are sorting by in priority order (highest
+               /// priority first); cannot be <code>null</code> or empty
+               /// </param>
+               /// <param name="size">The number of hits to retain. Must be greater than zero.
+               /// </param>
+               /// <throws>  IOException </throws>
+               public static FieldValueHitQueue Create(SortField[] fields, int size)
+               {
+                       
+                       if (fields.Length == 0)
+                       {
+                               throw new System.ArgumentException("Sort must contain at least one field");
+                       }
+                       
+                       if (fields.Length == 1)
+                       {
+                               return new OneComparatorFieldValueHitQueue(fields, size);
+                       }
+                       else
+                       {
+                               return new MultiComparatorsFieldValueHitQueue(fields, size);
+                       }
+               }
+               
+               internal virtual FieldComparator[] GetComparators()
+               {
+                       return comparators;
+               }
+               
+               internal virtual int[] GetReverseMul()
+               {
+                       return reverseMul;
+               }
+               
+               /// <summary>Stores the sort criteria being used. </summary>
+               protected internal SortField[] fields;
+               protected internal FieldComparator[] comparators;
+               protected internal int[] reverseMul;
+               
+               public abstract override bool LessThan(System.Object a, System.Object b);
+               
+               /// <summary> Given a queue Entry, creates a corresponding FieldDoc
+               /// that contains the values used to sort the given document.
+               /// These values are not the raw values out of the index, but the internal
+               /// representation of them. This is so the given search hit can be collated by
+               /// a MultiSearcher with other search hits.
+               /// 
+               /// </summary>
+               /// <param name="entry">The Entry used to create a FieldDoc
+               /// </param>
+               /// <returns> The newly created FieldDoc
+               /// </returns>
+               /// <seealso cref="Searchable.Search(Weight,Filter,int,Sort)">
+               /// </seealso>
+               internal virtual FieldDoc FillFields(Entry entry)
+               {
+                       int n = comparators.Length;
+                       System.IComparable[] fields = new System.IComparable[n];
+                       for (int i = 0; i < n; ++i)
+                       {
+                               fields[i] = comparators[i].Value(entry.slot);
+                       }
+                       //if (maxscore > 1.0f) doc.score /= maxscore;   // normalize scores
+                       return new FieldDoc(entry.docID, entry.score, fields);
+               }
+               
+               /// <summary>Returns the SortFields being used by this hit queue. </summary>
+               internal virtual SortField[] GetFields()
+               {
+                       return fields;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Filter.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Filter.cs
new file mode 100644 (file)
index 0000000..1d01125
--- /dev/null
@@ -0,0 +1,80 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+using DocIdBitSet = Mono.Lucene.Net.Util.DocIdBitSet;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary>Abstract base class for restricting which documents may be returned during searching.
+       /// <p/>
+       /// <b>Note:</b> In Lucene 3.0 {@link #Bits(IndexReader)} will be removed
+       /// and {@link #GetDocIdSet(IndexReader)} will be defined as abstract.
+       /// All implementing classes must therefore implement {@link #GetDocIdSet(IndexReader)}
+       /// in order to work with Lucene 3.0.
+       /// </summary>
+       [Serializable]
+       public abstract class Filter
+       { 
+        
+
+        /// <summary>><b>NOTE:</b> See {@link #getDocIdSet(IndexReader)} for
+        /// handling of multi-segment indexes (which applies to
+        /// this method as well.
+        /// </summary
+               /// <returns> A BitSet with true for documents which should be permitted in
+               /// search results, and false for those that should not.
+               /// </returns>
+               /// <deprecated> Use {@link #GetDocIdSet(IndexReader)} instead.
+               /// </deprecated>
+        [Obsolete("Use GetDocIdSet(IndexReader) instead.")]
+               public virtual System.Collections.BitArray Bits(IndexReader reader)
+               {
+                       throw new System.NotSupportedException();
+               }
+               
+        ///<summary>
+        ///  Creates a {@link DocIdSet} enumerating the documents that should be
+        ///  permitted in search results. <b>NOTE:</b> null can be
+        ///  returned if no documents are accepted by this Filter.
+        ///  <p/>
+        ///  Note: This method will be called once per segment in
+        ///  the index during searching.  The returned {@link DocIdSet}
+        ///  must refer to document IDs for that segment, not for
+        ///  the top-level reader.
+        ///   
+        ///  @param reader a {@link IndexReader} instance opened on the index currently
+        ///           searched on. Note, it is likely that the provided reader does not
+        ///           represent the whole underlying index i.e. if the index has more than
+        ///           one segment the given reader only represents a single segment.
+        ///            
+        ///</summary>
+               /// <returns> a DocIdSet that provides the documents which should be permitted or
+               /// prohibited in search results. <b>NOTE:</b> null can be returned if
+               /// no documents will be accepted by this Filter.
+               /// </returns>
+               /// <seealso cref="DocIdBitSet">
+               /// </seealso>
+               public virtual DocIdSet GetDocIdSet(IndexReader reader)
+               {
+                       return new DocIdBitSet(Bits(reader));
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/FilterManager.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/FilterManager.cs
new file mode 100644 (file)
index 0000000..c97911e
--- /dev/null
@@ -0,0 +1,232 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+using System.Collections.Generic;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary> Filter caching singleton.  It can be used 
+       /// to save filters locally for reuse.
+       /// This class makes it possble to cache Filters even when using RMI, as it
+       /// keeps the cache on the seaercher side of the RMI connection.
+       /// 
+       /// Also could be used as a persistent storage for any filter as long as the
+       /// filter provides a proper hashCode(), as that is used as the key in the cache.
+       /// 
+       /// The cache is periodically cleaned up from a separate thread to ensure the
+       /// cache doesn't exceed the maximum size.
+       /// </summary>
+       public class FilterManager
+       {
+               
+               protected internal static FilterManager manager;
+               
+               /// <summary>The default maximum number of Filters in the cache </summary>
+               protected internal const int DEFAULT_CACHE_CLEAN_SIZE = 100;
+               /// <summary>The default frequency of cache clenup </summary>
+               protected internal const long DEFAULT_CACHE_SLEEP_TIME = 1000 * 60 * 10;
+               
+               /// <summary>The cache itself </summary>
+               protected internal System.Collections.IDictionary cache;
+               /// <summary>Maximum allowed cache size </summary>
+               protected internal int cacheCleanSize;
+               /// <summary>Cache cleaning frequency </summary>
+               protected internal long cleanSleepTime;
+               /// <summary>Cache cleaner that runs in a separate thread </summary>
+               protected internal FilterCleaner filterCleaner;
+               
+               public static FilterManager GetInstance()
+               {
+                       lock (typeof(Mono.Lucene.Net.Search.FilterManager))
+                       {
+                               if (manager == null)
+                               {
+                                       manager = new FilterManager();
+                               }
+                               return manager;
+                       }
+               }
+               
+               /// <summary> Sets up the FilterManager singleton.</summary>
+               protected internal FilterManager()
+               {
+                       cache = new System.Collections.Hashtable();
+                       cacheCleanSize = DEFAULT_CACHE_CLEAN_SIZE; // Let the cache get to 100 items
+                       cleanSleepTime = DEFAULT_CACHE_SLEEP_TIME; // 10 minutes between cleanings
+                       
+                       filterCleaner = new FilterCleaner(this);
+                       SupportClass.ThreadClass fcThread = new SupportClass.ThreadClass(new System.Threading.ThreadStart(filterCleaner.Run));
+                       // setto be a Daemon so it doesn't have to be stopped
+                       fcThread.IsBackground = true;
+                       fcThread.Start();
+               }
+               
+               /// <summary> Sets the max size that cache should reach before it is cleaned up</summary>
+               /// <param name="cacheCleanSize">maximum allowed cache size
+               /// </param>
+               public virtual void  SetCacheSize(int cacheCleanSize)
+               {
+                       this.cacheCleanSize = cacheCleanSize;
+               }
+               
+               /// <summary> Sets the cache cleaning frequency in milliseconds.</summary>
+               /// <param name="cleanSleepTime">cleaning frequency in millioseconds
+               /// </param>
+               public virtual void  SetCleanThreadSleepTime(long cleanSleepTime)
+               {
+                       this.cleanSleepTime = cleanSleepTime;
+               }
+               
+               /// <summary> Returns the cached version of the filter.  Allows the caller to pass up
+               /// a small filter but this will keep a persistent version around and allow
+               /// the caching filter to do its job.
+               /// 
+               /// </summary>
+               /// <param name="filter">The input filter
+               /// </param>
+               /// <returns> The cached version of the filter
+               /// </returns>
+               public virtual Filter GetFilter(Filter filter)
+               {
+                       lock (cache.SyncRoot)
+                       {
+                               FilterItem fi = null;
+                               fi = (FilterItem) cache[(System.Int32) filter.GetHashCode()];
+                               if (fi != null)
+                               {
+                                       fi.timestamp = System.DateTime.Now.Ticks;
+                                       return fi.filter;
+                               }
+                               cache[(System.Int32) filter.GetHashCode()] = new FilterItem(this, filter);
+                               return filter;
+                       }
+               }
+               
+               /// <summary> Holds the filter and the last time the filter was used, to make LRU-based
+               /// cache cleaning possible.
+               /// TODO: Clean this up when we switch to Java 1.5
+               /// </summary>
+               protected internal class FilterItem
+               {
+                       private void  InitBlock(FilterManager enclosingInstance)
+                       {
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private FilterManager enclosingInstance;
+                       public FilterManager Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       public Filter filter;
+                       public long timestamp;
+                       
+                       public FilterItem(FilterManager enclosingInstance, Filter filter)
+                       {
+                               InitBlock(enclosingInstance);
+                               this.filter = filter;
+                               this.timestamp = System.DateTime.Now.Ticks;
+                       }
+               }
+               
+               
+               /// <summary> Keeps the cache from getting too big.
+               /// If we were using Java 1.5, we could use LinkedHashMap and we would not need this thread
+               /// to clean out the cache.
+               /// 
+               /// The SortedSet sortedFilterItems is used only to sort the items from the cache,
+               /// so when it's time to clean up we have the TreeSet sort the FilterItems by
+               /// timestamp.
+               /// 
+               /// Removes 1.5 * the numbers of items to make the cache smaller.
+               /// For example:
+               /// If cache clean size is 10, and the cache is at 15, we would remove (15 - 10) * 1.5 = 7.5 round up to 8.
+               /// This way we clean the cache a bit more, and avoid having the cache cleaner having to do it frequently.
+               /// </summary>
+               protected internal class FilterCleaner : IThreadRunnable
+               {
+            private class FilterItemComparer : IComparer<FilterItem>
+            {
+                #region IComparer<FilterItem> Members
+
+                public int Compare(FilterItem x, FilterItem y)
+                {
+                    return x.timestamp.CompareTo(y.timestamp);
+                }
+
+                #endregion
+            }
+                       
+                       private bool running = true;
+            private FilterManager manager;
+            private List<FilterItem> filterItems;
+                       
+                       public FilterCleaner(FilterManager enclosingInstance)
+                       {
+                this.manager = enclosingInstance;
+                filterItems = new List<FilterItem>();
+            }
+                       
+                       public virtual void  Run()
+                       {
+                               while (running)
+                               {
+                                       
+                                       // sort items from oldest to newest 
+                                       // we delete the oldest filters 
+                    if (this.manager.cache.Count > this.manager.cacheCleanSize)
+                                       {
+                                               // empty the temporary set
+                                               filterItems.Clear();
+                        lock (this.manager.cache.SyncRoot)
+                                               {
+                            foreach (FilterItem item in this.manager.cache.Values)
+                            {
+                                filterItems.Add(item);
+                            }
+                            filterItems.Sort(new FilterItemComparer());
+
+                            int numToDelete = (int)((this.manager.cache.Count - this.manager.cacheCleanSize) * 1.5);
+                                                       // loop over the set and delete all of the cache entries not used in a while
+                            for(int i = 0; i < numToDelete; i++)
+                            {
+                                this.manager.cache.Remove(filterItems[i].filter.GetHashCode());
+                            }
+                                               }
+                                               // empty the set so we don't tie up the memory
+                        filterItems.Clear();
+                                       }
+                                       // take a nap
+                                       try
+                                       {
+                        System.Threading.Thread.Sleep(new System.TimeSpan((System.Int64)10000 * this.manager.cleanSleepTime));
+                                       }
+                                       catch (System.Threading.ThreadInterruptedException ie)
+                                       {
+                                               SupportClass.ThreadClass.Current().Interrupt();
+                                               throw new System.SystemException(ie.Message, ie);
+                                       }
+                               }
+                       }
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/FilteredDocIdSet.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/FilteredDocIdSet.cs
new file mode 100644 (file)
index 0000000..411ba64
--- /dev/null
@@ -0,0 +1,107 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary> Abstract decorator class for a DocIdSet implementation
+       /// that provides on-demand filtering/validation
+       /// mechanism on a given DocIdSet.
+       /// 
+       /// <p/>
+       /// 
+       /// Technically, this same functionality could be achieved
+       /// with ChainedFilter (under contrib/misc), however the
+       /// benefit of this class is it never materializes the full
+       /// bitset for the filter.  Instead, the {@link #match}
+       /// method is invoked on-demand, per docID visited during
+       /// searching.  If you know few docIDs will be visited, and
+       /// the logic behind {@link #match} is relatively costly,
+       /// this may be a better way to filter than ChainedFilter.
+       /// 
+       /// </summary>
+       /// <seealso cref="DocIdSet">
+       /// </seealso>
+       
+       public abstract class FilteredDocIdSet:DocIdSet
+       {
+               private class AnonymousClassFilteredDocIdSetIterator:FilteredDocIdSetIterator
+               {
+                       public AnonymousClassFilteredDocIdSetIterator(FilteredDocIdSet enclosingInstance) : base(null)
+                       {
+                System.Diagnostics.Debug.Fail("Port issue:", "Lets see if we need this"); // {{Aroush-2.9}}
+                               InitBlock(enclosingInstance);
+                       }
+                       private void InitBlock(FilteredDocIdSet enclosingInstance)
+                       {
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private FilteredDocIdSet enclosingInstance;
+                       public FilteredDocIdSet Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       internal AnonymousClassFilteredDocIdSetIterator(FilteredDocIdSet enclosingInstance, Mono.Lucene.Net.Search.DocIdSetIterator Param1):base(Param1)
+                       {
+                               InitBlock(enclosingInstance);
+                       }
+                       public /*protected internal*/ override bool Match(int docid)
+                       {
+                               return Enclosing_Instance.Match(docid);
+                       }
+               }
+               private DocIdSet _innerSet;
+               
+               /// <summary> Constructor.</summary>
+               /// <param name="innerSet">Underlying DocIdSet
+               /// </param>
+               public FilteredDocIdSet(DocIdSet innerSet)
+               {
+                       _innerSet = innerSet;
+               }
+               
+               /// <summary>This DocIdSet implementation is cacheable if the inner set is cacheable. </summary>
+               public override bool IsCacheable()
+               {
+                       return _innerSet.IsCacheable();
+               }
+               
+               /// <summary> Validation method to determine whether a docid should be in the result set.</summary>
+               /// <param name="docid">docid to be tested
+               /// </param>
+               /// <returns> true if input docid should be in the result set, false otherwise.
+               /// </returns>
+               public /*protected internal*/ abstract bool Match(int docid);
+               
+               /// <summary> Implementation of the contract to build a DocIdSetIterator.</summary>
+               /// <seealso cref="DocIdSetIterator">
+               /// </seealso>
+               /// <seealso cref="FilteredDocIdSetIterator">
+               /// </seealso>
+               // @Override
+               public override DocIdSetIterator Iterator()
+               {
+                       return new AnonymousClassFilteredDocIdSetIterator(this, _innerSet.Iterator());
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/FilteredDocIdSetIterator.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/FilteredDocIdSetIterator.cs
new file mode 100644 (file)
index 0000000..f1fe58c
--- /dev/null
@@ -0,0 +1,120 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary> Abstract decorator class of a DocIdSetIterator
+       /// implementation that provides on-demand filter/validation
+       /// mechanism on an underlying DocIdSetIterator.  See {@link
+       /// FilteredDocIdSet}.
+       /// </summary>
+       public abstract class FilteredDocIdSetIterator:DocIdSetIterator
+       {
+               protected internal DocIdSetIterator _innerIter;
+               private int doc;
+               
+               /// <summary> Constructor.</summary>
+               /// <param name="innerIter">Underlying DocIdSetIterator.
+               /// </param>
+               public FilteredDocIdSetIterator(DocIdSetIterator innerIter)
+               {
+                       if (innerIter == null)
+                       {
+                               throw new System.ArgumentException("null iterator");
+                       }
+                       _innerIter = innerIter;
+                       doc = - 1;
+               }
+               
+               /// <summary> Validation method to determine whether a docid should be in the result set.</summary>
+               /// <param name="doc">docid to be tested
+               /// </param>
+               /// <returns> true if input docid should be in the result set, false otherwise.
+               /// </returns>
+               /// <seealso cref="FilteredDocIdSetIterator(DocIdSetIterator)">
+               /// </seealso>
+               public abstract /*protected internal*/ bool Match(int doc);
+               
+               /// <deprecated> use {@link #DocID()} instead. 
+               /// </deprecated>
+        [Obsolete("use DocID() instead.")]
+               public override int Doc()
+               {
+                       return doc;
+               }
+               
+               public override int DocID()
+               {
+                       return doc;
+               }
+               
+               /// <deprecated> use {@link #NextDoc()} instead. 
+               /// </deprecated>
+        [Obsolete("use NextDoc() instead.")]
+               public override bool Next()
+               {
+                       return NextDoc() != NO_MORE_DOCS;
+               }
+               
+               public override int NextDoc()
+               {
+                       while ((doc = _innerIter.NextDoc()) != NO_MORE_DOCS)
+                       {
+                               if (Match(doc))
+                               {
+                                       return doc;
+                               }
+                       }
+                       return doc;
+               }
+               
+               /// <deprecated> use {@link #Advance(int)} instead. 
+               /// </deprecated>
+        [Obsolete("use Advance(int) instead. ")]
+               public override bool SkipTo(int n)
+               {
+                       return Advance(n) != NO_MORE_DOCS;
+               }
+               
+               public override int Advance(int target)
+               {
+                       doc = _innerIter.Advance(target);
+                       if (doc != NO_MORE_DOCS)
+                       {
+                               if (Match(doc))
+                               {
+                                       return doc;
+                               }
+                               else
+                               {
+                                       while ((doc = _innerIter.NextDoc()) != NO_MORE_DOCS)
+                                       {
+                                               if (Match(doc))
+                                               {
+                                                       return doc;
+                                               }
+                                       }
+                                       return doc;
+                               }
+                       }
+                       return doc;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/FilteredQuery.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/FilteredQuery.cs
new file mode 100644 (file)
index 0000000..be5ea1e
--- /dev/null
@@ -0,0 +1,335 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+using ToStringUtils = Mono.Lucene.Net.Util.ToStringUtils;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       
+       /// <summary> A query that applies a filter to the results of another query.
+       /// 
+       /// <p/>Note: the bits are retrieved from the filter each time this
+       /// query is used in a search - use a CachingWrapperFilter to avoid
+       /// regenerating the bits every time.
+       /// 
+       /// <p/>Created: Apr 20, 2004 8:58:29 AM
+       /// 
+       /// </summary>
+       /// <since>   1.4
+       /// </since>
+       /// <version>  $Id: FilteredQuery.java 807821 2009-08-25 21:55:49Z mikemccand $
+       /// </version>
+       /// <seealso cref="CachingWrapperFilter">
+       /// </seealso>
+       [Serializable]
+       public class FilteredQuery:Query
+       {
+               [Serializable]
+               private class AnonymousClassWeight:Weight
+               {
+                       public AnonymousClassWeight(Mono.Lucene.Net.Search.Weight weight, Mono.Lucene.Net.Search.Similarity similarity, FilteredQuery enclosingInstance)
+                       {
+                               InitBlock(weight, similarity, enclosingInstance);
+                       }
+                       private class AnonymousClassScorer:Scorer
+                       {
+                               private void  InitBlock(Mono.Lucene.Net.Search.Scorer scorer, Mono.Lucene.Net.Search.DocIdSetIterator docIdSetIterator, AnonymousClassWeight enclosingInstance)
+                               {
+                                       this.scorer = scorer;
+                                       this.docIdSetIterator = docIdSetIterator;
+                                       this.enclosingInstance = enclosingInstance;
+                               }
+                               private Mono.Lucene.Net.Search.Scorer scorer;
+                               private Mono.Lucene.Net.Search.DocIdSetIterator docIdSetIterator;
+                               private AnonymousClassWeight enclosingInstance;
+                               public AnonymousClassWeight Enclosing_Instance
+                               {
+                                       get
+                                       {
+                                               return enclosingInstance;
+                                       }
+                                       
+                               }
+                               internal AnonymousClassScorer(Mono.Lucene.Net.Search.Scorer scorer, Mono.Lucene.Net.Search.DocIdSetIterator docIdSetIterator, AnonymousClassWeight enclosingInstance, Mono.Lucene.Net.Search.Similarity Param1):base(Param1)
+                               {
+                                       InitBlock(scorer, docIdSetIterator, enclosingInstance);
+                               }
+                               
+                               private int doc = - 1;
+                               
+                               private int AdvanceToCommon(int scorerDoc, int disiDoc)
+                               {
+                                       while (scorerDoc != disiDoc)
+                                       {
+                                               if (scorerDoc < disiDoc)
+                                               {
+                                                       scorerDoc = scorer.Advance(disiDoc);
+                                               }
+                                               else
+                                               {
+                                                       disiDoc = docIdSetIterator.Advance(scorerDoc);
+                                               }
+                                       }
+                                       return scorerDoc;
+                               }
+                               
+                               /// <deprecated> use {@link #NextDoc()} instead. 
+                               /// </deprecated>
+                [Obsolete("use NextDoc() instead. ")]
+                               public override bool Next()
+                               {
+                                       return NextDoc() != NO_MORE_DOCS;
+                               }
+                               
+                               public override int NextDoc()
+                               {
+                                       int scorerDoc, disiDoc;
+                                       return doc = (disiDoc = docIdSetIterator.NextDoc()) != NO_MORE_DOCS && (scorerDoc = scorer.NextDoc()) != NO_MORE_DOCS && AdvanceToCommon(scorerDoc, disiDoc) != NO_MORE_DOCS?scorer.DocID():NO_MORE_DOCS;
+                               }
+                               
+                               /// <deprecated> use {@link #DocID()} instead. 
+                               /// </deprecated>
+                [Obsolete("use DocID() instead.")]
+                               public override int Doc()
+                               {
+                                       return scorer.Doc();
+                               }
+                               public override int DocID()
+                               {
+                                       return doc;
+                               }
+                               
+                               /// <deprecated> use {@link #Advance(int)} instead. 
+                               /// </deprecated>
+                [Obsolete("use Advance(int) instead.")]
+                               public override bool SkipTo(int i)
+                               {
+                                       return Advance(i) != NO_MORE_DOCS;
+                               }
+                               
+                               public override int Advance(int target)
+                               {
+                                       int disiDoc, scorerDoc;
+                                       return doc = (disiDoc = docIdSetIterator.Advance(target)) != NO_MORE_DOCS && (scorerDoc = scorer.Advance(disiDoc)) != NO_MORE_DOCS && AdvanceToCommon(scorerDoc, disiDoc) != NO_MORE_DOCS?scorer.DocID():NO_MORE_DOCS;
+                               }
+                               
+                               public override float Score()
+                               {
+                                       return Enclosing_Instance.Enclosing_Instance.GetBoost() * scorer.Score();
+                               }
+                               
+                               // add an explanation about whether the document was filtered
+                               public override Explanation Explain(int i)
+                               {
+                                       Explanation exp = scorer.Explain(i);
+                                       
+                                       if (docIdSetIterator.Advance(i) == i)
+                                       {
+                                               exp.SetDescription("allowed by filter: " + exp.GetDescription());
+                                               exp.SetValue(Enclosing_Instance.Enclosing_Instance.GetBoost() * exp.GetValue());
+                                       }
+                                       else
+                                       {
+                                               exp.SetDescription("removed by filter: " + exp.GetDescription());
+                                               exp.SetValue(0.0f);
+                                       }
+                                       return exp;
+                               }
+                       }
+                       private void  InitBlock(Mono.Lucene.Net.Search.Weight weight, Mono.Lucene.Net.Search.Similarity similarity, FilteredQuery enclosingInstance)
+                       {
+                               this.weight = weight;
+                               this.similarity = similarity;
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private Mono.Lucene.Net.Search.Weight weight;
+                       private Mono.Lucene.Net.Search.Similarity similarity;
+                       private FilteredQuery enclosingInstance;
+                       public FilteredQuery Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       private float value_Renamed;
+                       
+                       // pass these methods through to enclosed query's weight
+                       public override float GetValue()
+                       {
+                               return value_Renamed;
+                       }
+                       public override float SumOfSquaredWeights()
+                       {
+                               return weight.SumOfSquaredWeights() * Enclosing_Instance.GetBoost() * Enclosing_Instance.GetBoost();
+                       }
+                       public override void  Normalize(float v)
+                       {
+                               weight.Normalize(v);
+                               value_Renamed = weight.GetValue() * Enclosing_Instance.GetBoost();
+                       }
+                       public override Explanation Explain(IndexReader ir, int i)
+                       {
+                               Explanation inner = weight.Explain(ir, i);
+                               if (Enclosing_Instance.GetBoost() != 1)
+                               {
+                                       Explanation preBoost = inner;
+                                       inner = new Explanation(inner.GetValue() * Enclosing_Instance.GetBoost(), "product of:");
+                                       inner.AddDetail(new Explanation(Enclosing_Instance.GetBoost(), "boost"));
+                                       inner.AddDetail(preBoost);
+                               }
+                               Filter f = Enclosing_Instance.filter;
+                               DocIdSet docIdSet = f.GetDocIdSet(ir);
+                               DocIdSetIterator docIdSetIterator = docIdSet == null?DocIdSet.EMPTY_DOCIDSET.Iterator():docIdSet.Iterator();
+                               if (docIdSetIterator == null)
+                               {
+                                       docIdSetIterator = DocIdSet.EMPTY_DOCIDSET.Iterator();
+                               }
+                               if (docIdSetIterator.Advance(i) == i)
+                               {
+                                       return inner;
+                               }
+                               else
+                               {
+                                       Explanation result = new Explanation(0.0f, "failure to match filter: " + f.ToString());
+                                       result.AddDetail(inner);
+                                       return result;
+                               }
+                       }
+                       
+                       // return this query
+                       public override Query GetQuery()
+                       {
+                               return Enclosing_Instance;
+                       }
+                       
+                       // return a filtering scorer
+                       public override Scorer Scorer(IndexReader indexReader, bool scoreDocsInOrder, bool topScorer)
+                       {
+                               Scorer scorer = weight.Scorer(indexReader, true, false);
+                               if (scorer == null)
+                               {
+                                       return null;
+                               }
+                               DocIdSet docIdSet = Enclosing_Instance.filter.GetDocIdSet(indexReader);
+                               if (docIdSet == null)
+                               {
+                                       return null;
+                               }
+                               DocIdSetIterator docIdSetIterator = docIdSet.Iterator();
+                               if (docIdSetIterator == null)
+                               {
+                                       return null;
+                               }
+                               
+                               return new AnonymousClassScorer(scorer, docIdSetIterator, this, similarity);
+                       }
+               }
+               
+               internal Query query;
+               internal Filter filter;
+               
+               /// <summary> Constructs a new query which applies a filter to the results of the original query.
+               /// Filter.getDocIdSet() will be called every time this query is used in a search.
+               /// </summary>
+               /// <param name="query"> Query to be filtered, cannot be <code>null</code>.
+               /// </param>
+               /// <param name="filter">Filter to apply to query results, cannot be <code>null</code>.
+               /// </param>
+               public FilteredQuery(Query query, Filter filter)
+               {
+                       this.query = query;
+                       this.filter = filter;
+               }
+               
+               /// <summary> Returns a Weight that applies the filter to the enclosed query's Weight.
+               /// This is accomplished by overriding the Scorer returned by the Weight.
+               /// </summary>
+               public override Weight CreateWeight(Searcher searcher)
+               {
+                       Weight weight = query.CreateWeight(searcher);
+                       Similarity similarity = query.GetSimilarity(searcher);
+                       return new AnonymousClassWeight(weight, similarity, this);
+               }
+               
+               /// <summary>Rewrites the wrapped query. </summary>
+               public override Query Rewrite(IndexReader reader)
+               {
+                       Query rewritten = query.Rewrite(reader);
+                       if (rewritten != query)
+                       {
+                               FilteredQuery clone = (FilteredQuery) this.Clone();
+                               clone.query = rewritten;
+                               return clone;
+                       }
+                       else
+                       {
+                               return this;
+                       }
+               }
+               
+               public virtual Query GetQuery()
+               {
+                       return query;
+               }
+               
+               public virtual Filter GetFilter()
+               {
+                       return filter;
+               }
+               
+               // inherit javadoc
+               public override void  ExtractTerms(System.Collections.Hashtable terms)
+               {
+                       GetQuery().ExtractTerms(terms);
+               }
+               
+               /// <summary>Prints a user-readable version of this query. </summary>
+               public override System.String ToString(System.String s)
+               {
+                       System.Text.StringBuilder buffer = new System.Text.StringBuilder();
+                       buffer.Append("filtered(");
+                       buffer.Append(query.ToString(s));
+                       buffer.Append(")->");
+                       buffer.Append(filter);
+                       buffer.Append(ToStringUtils.Boost(GetBoost()));
+                       return buffer.ToString();
+               }
+               
+               /// <summary>Returns true iff <code>o</code> is equal to this. </summary>
+               public  override bool Equals(System.Object o)
+               {
+                       if (o is FilteredQuery)
+                       {
+                               FilteredQuery fq = (FilteredQuery) o;
+                               return (query.Equals(fq.query) && filter.Equals(fq.filter) && GetBoost() == fq.GetBoost());
+                       }
+                       return false;
+               }
+               
+               /// <summary>Returns a hash code value for this object. </summary>
+               public override int GetHashCode()
+               {
+                       return query.GetHashCode() ^ filter.GetHashCode() + System.Convert.ToInt32(GetBoost());
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/FilteredTermEnum.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/FilteredTermEnum.cs
new file mode 100644 (file)
index 0000000..fab365f
--- /dev/null
@@ -0,0 +1,119 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using Term = Mono.Lucene.Net.Index.Term;
+using TermEnum = Mono.Lucene.Net.Index.TermEnum;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary>Abstract class for enumerating a subset of all terms. 
+       /// <p/>Term enumerations are always ordered by Term.compareTo().  Each term in
+       /// the enumeration is greater than all that precede it.  
+       /// </summary>
+       public abstract class FilteredTermEnum:TermEnum
+       {
+               /// <summary>the current term </summary>
+               protected internal Term currentTerm = null;
+               
+               /// <summary>the delegate enum - to set this member use {@link #setEnum} </summary>
+               protected internal TermEnum actualEnum = null;
+               
+               public FilteredTermEnum()
+               {
+               }
+               
+               /// <summary>Equality compare on the term </summary>
+               public /*protected internal*/ abstract bool TermCompare(Term term);
+               
+               /// <summary>Equality measure on the term </summary>
+               public abstract float Difference();
+               
+               /// <summary>Indicates the end of the enumeration has been reached </summary>
+               public abstract bool EndEnum();
+               
+               /// <summary> use this method to set the actual TermEnum (e.g. in ctor),
+               /// it will be automatically positioned on the first matching term.
+               /// </summary>
+               protected internal virtual void  SetEnum(TermEnum actualEnum)
+               {
+                       this.actualEnum = actualEnum;
+                       // Find the first term that matches
+                       Term term = actualEnum.Term();
+                       if (term != null && TermCompare(term))
+                               currentTerm = term;
+                       else
+                               Next();
+               }
+               
+               /// <summary> Returns the docFreq of the current Term in the enumeration.
+               /// Returns -1 if no Term matches or all terms have been enumerated.
+               /// </summary>
+               public override int DocFreq()
+               {
+                       if (currentTerm == null)
+                               return - 1;
+                       System.Diagnostics.Debug.Assert(actualEnum != null);
+                       return actualEnum.DocFreq();
+               }
+               
+               /// <summary>Increments the enumeration to the next element.  True if one exists. </summary>
+               public override bool Next()
+               {
+                       if (actualEnum == null)
+                               return false; // the actual enumerator is not initialized!
+                       currentTerm = null;
+                       while (currentTerm == null)
+                       {
+                               if (EndEnum())
+                                       return false;
+                               if (actualEnum.Next())
+                               {
+                                       Term term = actualEnum.Term();
+                                       if (TermCompare(term))
+                                       {
+                                               currentTerm = term;
+                                               return true;
+                                       }
+                               }
+                               else
+                                       return false;
+                       }
+                       currentTerm = null;
+                       return false;
+               }
+               
+               /// <summary>Returns the current Term in the enumeration.
+               /// Returns null if no Term matches or all terms have been enumerated. 
+               /// </summary>
+               public override Term Term()
+               {
+                       return currentTerm;
+               }
+               
+               /// <summary>Closes the enumeration to further activity, freeing resources.  </summary>
+               public override void  Close()
+               {
+                       if (actualEnum != null)
+                               actualEnum.Close();
+                       currentTerm = null;
+                       actualEnum = null;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Function/ByteFieldSource.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Function/ByteFieldSource.cs
new file mode 100644 (file)
index 0000000..964aa24
--- /dev/null
@@ -0,0 +1,138 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+using FieldCache = Mono.Lucene.Net.Search.FieldCache;
+
+namespace Mono.Lucene.Net.Search.Function
+{
+       
+       /// <summary> Expert: obtains single byte field values from the 
+       /// {@link Mono.Lucene.Net.Search.FieldCache FieldCache}
+       /// using <code>getBytes()</code> and makes those values 
+       /// available as other numeric types, casting as needed.
+       /// 
+       /// <p/><font color="#FF0000">
+       /// WARNING: The status of the <b>Search.Function</b> package is experimental. 
+       /// The APIs introduced here might change in the future and will not be 
+       /// supported anymore in such a case.</font>
+       /// 
+       /// </summary>
+       /// <seealso cref="Mono.Lucene.Net.Search.Function.FieldCacheSource"> for requirements"
+       /// on the field. 
+       /// 
+       /// <p/><b>NOTE</b>: with the switch in 2.9 to segment-based
+       /// searching, if {@link #getValues} is invoked with a
+       /// composite (multi-segment) reader, this can easily cause
+       /// double RAM usage for the values in the FieldCache.  It's
+       /// best to switch your application to pass only atomic
+       /// (single segment) readers to this API.  Alternatively, for
+       /// a short-term fix, you could wrap your ValueSource using
+       /// {@link MultiValueSource}, which costs more CPU per lookup
+       /// but will not consume double the FieldCache RAM.<p/>
+       /// </seealso>
+       [Serializable]
+       public class ByteFieldSource:FieldCacheSource
+       {
+               private class AnonymousClassDocValues:DocValues
+               {
+                       public AnonymousClassDocValues(sbyte[] arr, ByteFieldSource enclosingInstance)
+                       {
+                               InitBlock(arr, enclosingInstance);
+                       }
+                       private void  InitBlock(sbyte[] arr, ByteFieldSource enclosingInstance)
+                       {
+                               this.arr = arr;
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private sbyte[] arr;
+                       private ByteFieldSource enclosingInstance;
+                       public ByteFieldSource Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       /*(non-Javadoc) @see Mono.Lucene.Net.Search.Function.DocValues#floatVal(int) */
+                       public override float FloatVal(int doc)
+                       {
+                               return (float) arr[doc];
+                       }
+                       /*(non-Javadoc) @see Mono.Lucene.Net.Search.Function.DocValues#intVal(int) */
+                       public override int IntVal(int doc)
+                       {
+                               return arr[doc];
+                       }
+                       /*(non-Javadoc) @see Mono.Lucene.Net.Search.Function.DocValues#toString(int) */
+                       public override System.String ToString(int doc)
+                       {
+                               return Enclosing_Instance.Description() + '=' + IntVal(doc);
+                       }
+                       /*(non-Javadoc) @see Mono.Lucene.Net.Search.Function.DocValues#getInnerArray() */
+                       public /*internal*/ override System.Object GetInnerArray()
+                       {
+                               return arr;
+                       }
+               }
+               private Mono.Lucene.Net.Search.ByteParser parser;
+               
+               /// <summary> Create a cached byte field source with default string-to-byte parser. </summary>
+               public ByteFieldSource(System.String field):this(field, null)
+               {
+               }
+               
+               /// <summary> Create a cached byte field source with a specific string-to-byte parser. </summary>
+               public ByteFieldSource(System.String field, Mono.Lucene.Net.Search.ByteParser parser):base(field)
+               {
+                       this.parser = parser;
+               }
+               
+               /*(non-Javadoc) @see Mono.Lucene.Net.Search.Function.ValueSource#description() */
+               public override System.String Description()
+               {
+                       return "byte(" + base.Description() + ')';
+               }
+               
+               /*(non-Javadoc) @see Mono.Lucene.Net.Search.Function.FieldCacheSource#getCachedValues(Mono.Lucene.Net.Search.FieldCache, java.lang.String, Mono.Lucene.Net.Index.IndexReader) */
+               public override DocValues GetCachedFieldValues(FieldCache cache, System.String field, IndexReader reader)
+               {
+                       sbyte[] arr = cache.GetBytes(reader, field, parser);
+                       return new AnonymousClassDocValues(arr, this);
+               }
+               
+               /*(non-Javadoc) @see Mono.Lucene.Net.Search.Function.FieldCacheSource#cachedFieldSourceEquals(Mono.Lucene.Net.Search.Function.FieldCacheSource) */
+               public override bool CachedFieldSourceEquals(FieldCacheSource o)
+               {
+                       if (o.GetType() != typeof(ByteFieldSource))
+                       {
+                               return false;
+                       }
+                       ByteFieldSource other = (ByteFieldSource) o;
+                       return this.parser == null?other.parser == null:this.parser.GetType() == other.parser.GetType();
+               }
+               
+               /*(non-Javadoc) @see Mono.Lucene.Net.Search.Function.FieldCacheSource#cachedFieldSourceHashCode() */
+               public override int CachedFieldSourceHashCode()
+               {
+                       return parser == null?typeof(System.SByte).GetHashCode():parser.GetType().GetHashCode();
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Function/CustomScoreProvider.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Function/CustomScoreProvider.cs
new file mode 100644 (file)
index 0000000..9fd743a
--- /dev/null
@@ -0,0 +1,175 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+using System.Collections.Generic;
+using System.Text;
+
+using Mono.Lucene.Net.Index;
+
+namespace Mono.Lucene.Net.Search.Function
+{
+    /**
+ * An instance of this subclass should be returned by
+ * {@link CustomScoreQuery#getCustomScoreProvider}, if you want
+ * to modify the custom score calculation of a {@link CustomScoreQuery}.
+ * <p>Since Lucene 2.9, queries operate on each segment of an Index separately,
+ * so overriding the similar (now deprecated) methods in {@link CustomScoreQuery}
+ * is no longer suitable, as the supplied <code>doc</code> ID is per-segment
+ * and without knowledge of the IndexReader you cannot access the
+ * document or {@link FieldCache}.
+ * 
+ * @lucene.experimental
+ * @since 2.9.2
+ */
+    public class CustomScoreProvider
+    {
+
+        protected IndexReader reader;
+
+        /// <summary>
+        /// Creates a new instance of the provider class for the given IndexReader.
+        /// </summary>
+        public CustomScoreProvider(IndexReader reader)
+        {
+            this.reader = reader;
+        }
+
+        /// <summary>
+        /// * Compute a custom score by the subQuery score and a number of 
+        /// ValueSourceQuery scores.
+        /// <p/> 
+        /// Subclasses can override this method to modify the custom score.  
+        /// <p/>
+        /// If your custom scoring is different than the default herein you 
+        /// should override at least one of the two customScore() methods.
+        /// If the number of ValueSourceQueries is always &lt; 2 it is 
+        /// sufficient to override the other 
+        /// {@link #customScore(int, float, float) customScore()} 
+        /// method, which is simpler. 
+        /// <p/>
+        /// The default computation herein is a multiplication of given scores:
+        /// <pre>
+        ///     ModifiedScore = valSrcScore * valSrcScores[0] * valSrcScores[1] * ...
+        /// </pre>
+        /// </summary>
+        /// <param name="doc">id of scored doc</param>
+        /// <param name="subQueryScore">score of that doc by the subQuery</param>
+        /// <param name="valSrcScores">scores of that doc by the ValueSourceQuery</param>
+        /// <returns>custom score</returns>
+        public virtual float CustomScore(int doc, float subQueryScore, float[] valSrcScores)
+        {
+            if (valSrcScores.Length == 1)
+            {
+                return CustomScore(doc, subQueryScore, valSrcScores[0]);
+            }
+            if (valSrcScores.Length == 0)
+            {
+                return CustomScore(doc, subQueryScore, 1);
+            }
+            float score = subQueryScore;
+            for (int i = 0; i < valSrcScores.Length; i++)
+            {
+                score *= valSrcScores[i];
+            }
+            return score;
+        }
+                
+        /// <summary>
+        /// Compute a custom score by the subQuery score and the ValueSourceQuery score.
+        /// <p/> 
+        /// Subclasses can override this method to modify the custom score.
+        /// <p/>
+        /// If your custom scoring is different than the default herein you 
+        /// should override at least one of the two customScore() methods.
+        /// If the number of ValueSourceQueries is always < 2 it is 
+        /// sufficient to override this customScore() method, which is simpler. 
+        /// <p/>
+        /// The default computation herein is a multiplication of the two scores:
+        /// <pre>
+        ///     ModifiedScore = subQueryScore /// valSrcScore
+        /// </pre>
+        /// </summary>
+        /// <param name="doc">id of scored doc</param>
+        /// <param name="subQueryScore">score of that doc by the subQuery</param>
+        /// <param name="valSrcScore">score of that doc by the ValueSourceQuery</param>
+        /// <returns>custom score</returns>
+        public virtual float CustomScore(int doc, float subQueryScore, float valSrcScore)
+        {
+            return subQueryScore * valSrcScore;
+        }
+
+        /// <summary>
+        /// Explain the custom score.
+        /// Whenever overriding {@link #customScore(int, float, float[])}, 
+        /// this method should also be overridden to provide the correct explanation
+        /// for the part of the custom scoring.
+        /// </summary>
+        /// <param name="doc">doc being explained</param>
+        /// <param name="subQueryExpl">explanation for the sub-query part</param>
+        /// <param name="valSrcExpls">explanation for the value source part</param>
+        /// <returns>an explanation for the custom score</returns>
+        public virtual Explanation CustomExplain(int doc, Explanation subQueryExpl, Explanation[] valSrcExpls)
+        {
+            if (valSrcExpls.Length == 1)
+            {
+                return CustomExplain(doc, subQueryExpl, valSrcExpls[0]);
+            }
+            if (valSrcExpls.Length == 0)
+            {
+                return subQueryExpl;
+            }
+            float valSrcScore = 1;
+            for (int i = 0; i < valSrcExpls.Length; i++)
+            {
+                valSrcScore *= valSrcExpls[i].GetValue();
+            }
+            Explanation exp = new Explanation(valSrcScore * subQueryExpl.GetValue(), "custom score: product of:");
+            exp.AddDetail(subQueryExpl);
+            for (int i = 0; i < valSrcExpls.Length; i++)
+            {
+                exp.AddDetail(valSrcExpls[i]);
+            }
+            return exp;
+        }
+                
+        /// <summary>
+        /// Explain the custom score.
+        /// Whenever overriding {@link #customScore(int, float, float)}, 
+        /// this method should also be overridden to provide the correct explanation
+        /// for the part of the custom scoring.
+        /// 
+        /// </summary>
+        /// <param name="doc">doc being explained</param>
+        /// <param name="subQueryExpl">explanation for the sub-query part</param>
+        /// <param name="valSrcExpl">explanation for the value source part</param>
+        /// <returns>an explanation for the custom score</returns>
+        public virtual Explanation CustomExplain(int doc, Explanation subQueryExpl, Explanation valSrcExpl)
+        {
+            float valSrcScore = 1;
+            if (valSrcExpl != null)
+            {
+                valSrcScore *= valSrcExpl.GetValue();
+            }
+            Explanation exp = new Explanation(valSrcScore * subQueryExpl.GetValue(), "custom score: product of:");
+            exp.AddDetail(subQueryExpl);
+            exp.AddDetail(valSrcExpl);
+            return exp;
+        }
+
+    }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Function/CustomScoreQuery.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Function/CustomScoreQuery.cs
new file mode 100644 (file)
index 0000000..69fd9bf
--- /dev/null
@@ -0,0 +1,635 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+using ToStringUtils = Mono.Lucene.Net.Util.ToStringUtils;
+using ComplexExplanation = Mono.Lucene.Net.Search.ComplexExplanation;
+using Explanation = Mono.Lucene.Net.Search.Explanation;
+using Query = Mono.Lucene.Net.Search.Query;
+using Scorer = Mono.Lucene.Net.Search.Scorer;
+using Searcher = Mono.Lucene.Net.Search.Searcher;
+using Similarity = Mono.Lucene.Net.Search.Similarity;
+using Weight = Mono.Lucene.Net.Search.Weight;
+
+namespace Mono.Lucene.Net.Search.Function
+{
+       
+       /// <summary> Query that sets document score as a programmatic function of several (sub) scores:
+       /// <ol>
+       /// <li>the score of its subQuery (any query)</li>
+       /// <li>(optional) the score of its ValueSourceQuery (or queries).
+       /// For most simple/convenient use cases this query is likely to be a 
+       /// {@link Mono.Lucene.Net.Search.Function.FieldScoreQuery FieldScoreQuery}</li>
+       /// </ol>
+    /// Subclasses can modify the computation by overriding {@link #getCustomScoreProvider}.
+       /// 
+       /// <p/><font color="#FF0000">
+       /// WARNING: The status of the <b>Search.Function</b> package is experimental. 
+       /// The APIs introduced here might change in the future and will not be 
+       /// supported anymore in such a case.</font>
+       /// </summary>
+       [Serializable]
+       public class CustomScoreQuery:Query, System.ICloneable
+       {
+               
+               private Query subQuery;
+               private ValueSourceQuery[] valSrcQueries; // never null (empty array if there are no valSrcQueries).
+               private bool strict = false; // if true, valueSource part of query does not take part in weights normalization.  
+               
+               /// <summary> Create a CustomScoreQuery over input subQuery.</summary>
+               /// <param name="subQuery">the sub query whose scored is being customed. Must not be null. 
+               /// </param>
+               public CustomScoreQuery(Query subQuery):this(subQuery, new ValueSourceQuery[0])
+               {
+               }
+               
+               /// <summary> Create a CustomScoreQuery over input subQuery and a {@link ValueSourceQuery}.</summary>
+               /// <param name="subQuery">the sub query whose score is being customed. Must not be null.
+               /// </param>
+               /// <param name="valSrcQuery">a value source query whose scores are used in the custom score
+               /// computation. For most simple/convineient use case this would be a 
+               /// {@link Mono.Lucene.Net.Search.Function.FieldScoreQuery FieldScoreQuery}.
+        /// This parameter is optional - it can be null or even an empty array.
+               /// </param>
+               public CustomScoreQuery(Query subQuery, ValueSourceQuery valSrcQuery):this(subQuery, valSrcQuery != null?new ValueSourceQuery[]{valSrcQuery}:new ValueSourceQuery[0])
+               {
+               }
+               
+               /// <summary> Create a CustomScoreQuery over input subQuery and a {@link ValueSourceQuery}.</summary>
+               /// <param name="subQuery">the sub query whose score is being customized. Must not be null.
+               /// </param>
+               /// <param name="valSrcQueries">value source queries whose scores are used in the custom score
+               /// computation. For most simple/convenient use case these would be 
+               /// {@link Mono.Lucene.Net.Search.Function.FieldScoreQuery FieldScoreQueries}.
+               /// This parameter is optional - it can be null or even an empty array.
+               /// </param>
+               public CustomScoreQuery(Query subQuery, ValueSourceQuery[] valSrcQueries)
+               {
+                       this.subQuery = subQuery;
+                       this.valSrcQueries = valSrcQueries != null?valSrcQueries:new ValueSourceQuery[0];
+                       if (subQuery == null)
+                               throw new System.ArgumentException("<subquery> must not be null!");
+               }
+               
+               /*(non-Javadoc) @see Mono.Lucene.Net.Search.Query#rewrite(Mono.Lucene.Net.Index.IndexReader) */
+        public override Query Rewrite(IndexReader reader)
+        {
+            CustomScoreQuery clone = null;
+
+            Query sq = subQuery.Rewrite(reader);
+            if (sq != subQuery)
+            {
+                clone = (CustomScoreQuery)Clone();
+                clone.subQuery = sq;
+            }
+
+            for (int i = 0; i < valSrcQueries.Length; i++)
+            {
+                ValueSourceQuery v = (ValueSourceQuery)valSrcQueries[i].Rewrite(reader);
+                if (v != valSrcQueries[i])
+                {
+                    if (clone == null) clone = (CustomScoreQuery)Clone();
+                    clone.valSrcQueries[i] = v;
+                }
+            }
+
+            return (clone == null) ? this : clone;
+        }
+               
+               /*(non-Javadoc) @see Mono.Lucene.Net.Search.Query#extractTerms(java.util.Set) */
+               public override void  ExtractTerms(System.Collections.Hashtable terms)
+               {
+                       subQuery.ExtractTerms(terms);
+                       for (int i = 0; i < valSrcQueries.Length; i++)
+                       {
+                               valSrcQueries[i].ExtractTerms(terms);
+                       }
+               }
+               
+               /*(non-Javadoc) @see Mono.Lucene.Net.Search.Query#clone() */
+               public override System.Object Clone()
+               {
+                       CustomScoreQuery clone = (CustomScoreQuery) base.Clone();
+                       clone.subQuery = (Query) subQuery.Clone();
+                       clone.valSrcQueries = new ValueSourceQuery[valSrcQueries.Length];
+                       for (int i = 0; i < valSrcQueries.Length; i++)
+                       {
+                               clone.valSrcQueries[i] = (ValueSourceQuery) valSrcQueries[i].Clone();
+                       }
+                       return clone;
+               }
+               
+               /* (non-Javadoc) @see Mono.Lucene.Net.Search.Query#toString(java.lang.String) */
+               public override System.String ToString(System.String field)
+               {
+                       System.Text.StringBuilder sb = new System.Text.StringBuilder(Name()).Append("(");
+                       sb.Append(subQuery.ToString(field));
+                       for (int i = 0; i < valSrcQueries.Length; i++)
+                       {
+                               sb.Append(", ").Append(valSrcQueries[i].ToString(field));
+                       }
+                       sb.Append(")");
+                       sb.Append(strict?" STRICT":"");
+                       return sb.ToString() + ToStringUtils.Boost(GetBoost());
+               }
+               
+               /// <summary>Returns true if <code>o</code> is equal to this. </summary>
+               public  override bool Equals(System.Object o)
+               {
+                       if (GetType() != o.GetType())
+                       {
+                               return false;
+                       }
+                       CustomScoreQuery other = (CustomScoreQuery) o;
+            if (this.GetBoost() != other.GetBoost() ||
+                !this.subQuery.Equals(other.subQuery) ||
+                this.strict != other.strict ||
+                this.valSrcQueries.Length != other.valSrcQueries.Length)
+            {
+                return false;
+            }
+                       for (int i = 0; i < valSrcQueries.Length; i++)
+                       {
+                               //TODO simplify with Arrays.deepEquals() once moving to Java 1.5
+                               if (!valSrcQueries[i].Equals(other.valSrcQueries[i]))
+                               {
+                                       return false;
+                               }
+                       }
+                       return true;
+               }
+               
+               /// <summary>Returns a hash code value for this object. </summary>
+               public override int GetHashCode()
+               {
+                       int valSrcHash = 0;
+                       for (int i = 0; i < valSrcQueries.Length; i++)
+                       {
+                               //TODO simplify with Arrays.deepHashcode() once moving to Java 1.5
+                               valSrcHash += valSrcQueries[i].GetHashCode();
+                       }
+            return (GetType().GetHashCode() + subQuery.GetHashCode() + valSrcHash) ^
+                BitConverter.ToInt32(BitConverter.GetBytes(GetBoost()), 0) ^ (strict ? 1234 : 4321);
+
+               }
+
+        /**
+       * Returns a {@link CustomScoreProvider} that calculates the custom scores
+       * for the given {@link IndexReader}. The default implementation returns a default
+       * implementation as specified in the docs of {@link CustomScoreProvider}.
+       * @since 2.9.2
+       */
+        protected virtual CustomScoreProvider GetCustomScoreProvider(IndexReader reader)
+        {
+            // when deprecated methods are removed, do not extend class here, just return new default CustomScoreProvider
+            return new AnonymousCustomScoreProvider(this, reader);
+        }
+
+        class AnonymousCustomScoreProvider : CustomScoreProvider
+        {
+            CustomScoreQuery parent;
+            public AnonymousCustomScoreProvider(CustomScoreQuery parent, IndexReader reader) : base(reader)
+            {
+                this.parent = parent;
+            }
+            public override float CustomScore(int doc, float subQueryScore, float[] valSrcScores)
+            {
+                return parent.CustomScore(doc, subQueryScore, valSrcScores);
+            }
+
+            public override float CustomScore(int doc, float subQueryScore, float valSrcScore)
+            {
+                return parent.CustomScore(doc, subQueryScore, valSrcScore);
+            }
+
+            public override Explanation CustomExplain(int doc, Explanation subQueryExpl, Explanation[] valSrcExpls)
+            {
+                return parent.CustomExplain(doc, subQueryExpl, valSrcExpls);
+            }
+
+            public override Explanation CustomExplain(int doc, Explanation subQueryExpl, Explanation valSrcExpl)
+            {
+                return parent.CustomExplain(doc, subQueryExpl, valSrcExpl);
+            }
+        }
+               
+        /// <summary>
+        /// Compute a custom score by the subQuery score and a number of 
+        /// ValueSourceQuery scores.
+        /// 
+        /// The doc is relative to the current reader, which is
+        /// unknown to CustomScoreQuery when using per-segment search (since Lucene 2.9).
+        /// Please override {@link #getCustomScoreProvider} and return a subclass
+        /// of {@link CustomScoreProvider} for the given {@link IndexReader}.
+        /// see CustomScoreProvider#customScore(int,float,float[])
+        /// </summary>
+        [Obsolete("Will be removed in Lucene 3.1")]
+               public virtual float CustomScore(int doc, float subQueryScore, float[] valSrcScores)
+               {
+                       if (valSrcScores.Length == 1)
+                       {
+                               return CustomScore(doc, subQueryScore, valSrcScores[0]);
+                       }
+                       if (valSrcScores.Length == 0)
+                       {
+                               return CustomScore(doc, subQueryScore, 1);
+                       }
+                       float score = subQueryScore;
+                       for (int i = 0; i < valSrcScores.Length; i++)
+                       {
+                               score *= valSrcScores[i];
+                       }
+                       return score;
+               }
+               
+               /// <summary> Compute a custom score by the subQuery score and the ValueSourceQuery score.
+        /// 
+        /// The doc is relative to the current reader, which is
+        /// unknown to CustomScoreQuery when using per-segment search (since Lucene 2.9).
+        /// Please override {@link #getCustomScoreProvider} and return a subclass
+        /// of {@link CustomScoreProvider} for the given {@link IndexReader}.
+        /// @see CustomScoreProvider#customScore(int,float,float)
+               /// </summary>
+        [Obsolete("Will be removed in Lucene 3.1")]
+               public virtual float CustomScore(int doc, float subQueryScore, float valSrcScore)
+               {
+                       return subQueryScore * valSrcScore;
+               }
+
+        
+               
+               /// <summary> Explain the custom score.
+        /// 
+        /// The doc is relative to the current reader, which is
+        /// unknown to CustomScoreQuery when using per-segment search (since Lucene 2.9).
+        /// Please override {@link #getCustomScoreProvider} and return a subclass
+        /// of {@link CustomScoreProvider} for the given {@link IndexReader}.
+               /// </summary>
+        [Obsolete("Will be removed in Lucene 3.1.")]
+               public virtual Explanation CustomExplain(int doc, Explanation subQueryExpl, Explanation[] valSrcExpls)
+               {
+                       if (valSrcExpls.Length == 1)
+                       {
+                               return CustomExplain(doc, subQueryExpl, valSrcExpls[0]);
+                       }
+                       if (valSrcExpls.Length == 0)
+                       {
+                               return subQueryExpl;
+                       }
+                       float valSrcScore = 1;
+                       for (int i = 0; i < valSrcExpls.Length; i++)
+                       {
+                               valSrcScore *= valSrcExpls[i].GetValue();
+                       }
+                       Explanation exp = new Explanation(valSrcScore * subQueryExpl.GetValue(), "custom score: product of:");
+                       exp.AddDetail(subQueryExpl);
+                       for (int i = 0; i < valSrcExpls.Length; i++)
+                       {
+                               exp.AddDetail(valSrcExpls[i]);
+                       }
+                       return exp;
+               }
+               
+               /// <summary> Explain the custom score.
+        /// The doc is relative to the current reader, which is
+        /// unknown to CustomScoreQuery when using per-segment search (since Lucene 2.9).
+        /// Please override {@link #getCustomScoreProvider} and return a subclass
+        /// of {@link CustomScoreProvider} for the given {@link IndexReader}.
+               /// </summary>
+        [Obsolete("Will be removed in Lucene 3.1")]
+               public virtual Explanation CustomExplain(int doc, Explanation subQueryExpl, Explanation valSrcExpl)
+               {
+                       float valSrcScore = 1;
+                       if (valSrcExpl != null)
+                       {
+                               valSrcScore *= valSrcExpl.GetValue();
+                       }
+                       Explanation exp = new Explanation(valSrcScore * subQueryExpl.GetValue(), "custom score: product of:");
+                       exp.AddDetail(subQueryExpl);
+                       exp.AddDetail(valSrcExpl);
+                       return exp;
+               }
+               
+               //=========================== W E I G H T ============================
+               
+               [Serializable]
+               private class CustomWeight:Weight
+               {
+                       private void  InitBlock(CustomScoreQuery enclosingInstance)
+                       {
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private CustomScoreQuery enclosingInstance;
+                       public CustomScoreQuery Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       internal Similarity similarity;
+                       internal Weight subQueryWeight;
+                       internal Weight[] valSrcWeights;
+                       internal bool qStrict;
+                       
+                       public CustomWeight(CustomScoreQuery enclosingInstance, Searcher searcher)
+                       {
+                               InitBlock(enclosingInstance);
+                               this.similarity = Enclosing_Instance.GetSimilarity(searcher);
+                               this.subQueryWeight = Enclosing_Instance.subQuery.Weight(searcher);
+                               this.valSrcWeights = new Weight[Enclosing_Instance.valSrcQueries.Length];
+                               for (int i = 0; i < Enclosing_Instance.valSrcQueries.Length; i++)
+                               {
+                                       this.valSrcWeights[i] = Enclosing_Instance.valSrcQueries[i].CreateWeight(searcher);
+                               }
+                               this.qStrict = Enclosing_Instance.strict;
+                       }
+                       
+                       /*(non-Javadoc) @see Mono.Lucene.Net.Search.Weight#getQuery() */
+                       public override Query GetQuery()
+                       {
+                               return Enclosing_Instance;
+                       }
+                       
+                       /*(non-Javadoc) @see Mono.Lucene.Net.Search.Weight#getValue() */
+                       public override float GetValue()
+                       {
+                               return Enclosing_Instance.GetBoost();
+                       }
+                       
+                       /*(non-Javadoc) @see Mono.Lucene.Net.Search.Weight#sumOfSquaredWeights() */
+                       public override float SumOfSquaredWeights()
+                       {
+                               float sum = subQueryWeight.SumOfSquaredWeights();
+                               for (int i = 0; i < valSrcWeights.Length; i++)
+                               {
+                                       if (qStrict)
+                                       {
+                                               valSrcWeights[i].SumOfSquaredWeights(); // do not include ValueSource part in the query normalization
+                                       }
+                                       else
+                                       {
+                                               sum += valSrcWeights[i].SumOfSquaredWeights();
+                                       }
+                               }
+                               sum *= Enclosing_Instance.GetBoost() * Enclosing_Instance.GetBoost(); // boost each sub-weight
+                               return sum;
+                       }
+                       
+                       /*(non-Javadoc) @see Mono.Lucene.Net.Search.Weight#normalize(float) */
+                       public override void  Normalize(float norm)
+                       {
+                               norm *= Enclosing_Instance.GetBoost(); // incorporate boost
+                               subQueryWeight.Normalize(norm);
+                               for (int i = 0; i < valSrcWeights.Length; i++)
+                               {
+                                       if (qStrict)
+                                       {
+                                               valSrcWeights[i].Normalize(1); // do not normalize the ValueSource part
+                                       }
+                                       else
+                                       {
+                                               valSrcWeights[i].Normalize(norm);
+                                       }
+                               }
+                       }
+                       
+                       public override Scorer Scorer(IndexReader reader, bool scoreDocsInOrder, bool topScorer)
+                       {
+                               // Pass true for "scoresDocsInOrder", because we
+                               // require in-order scoring, even if caller does not,
+                               // since we call advance on the valSrcScorers.  Pass
+                               // false for "topScorer" because we will not invoke
+                               // score(Collector) on these scorers:
+                               Scorer subQueryScorer = subQueryWeight.Scorer(reader, true, false);
+                               if (subQueryScorer == null)
+                               {
+                                       return null;
+                               }
+                               Scorer[] valSrcScorers = new Scorer[valSrcWeights.Length];
+                               for (int i = 0; i < valSrcScorers.Length; i++)
+                               {
+                                       valSrcScorers[i] = valSrcWeights[i].Scorer(reader, true, topScorer);
+                               }
+                               return new CustomScorer(enclosingInstance, similarity, reader, this, subQueryScorer, valSrcScorers);
+                       }
+                       
+                       public override Explanation Explain(IndexReader reader, int doc)
+                       {
+                               Explanation explain = DoExplain(reader, doc);
+                               return explain == null?new Explanation(0.0f, "no matching docs"):explain;
+                       }
+                       
+                       private Explanation DoExplain(IndexReader reader, int doc)
+                       {
+                               Scorer[] valSrcScorers = new Scorer[valSrcWeights.Length];
+                               for (int i = 0; i < valSrcScorers.Length; i++)
+                               {
+                                       valSrcScorers[i] = valSrcWeights[i].Scorer(reader, true, false);
+                               }
+                               Explanation subQueryExpl = subQueryWeight.Explain(reader, doc);
+                               if (!subQueryExpl.IsMatch())
+                               {
+                                       return subQueryExpl;
+                               }
+                               // match
+                               Explanation[] valSrcExpls = new Explanation[valSrcScorers.Length];
+                               for (int i = 0; i < valSrcScorers.Length; i++)
+                               {
+                                       valSrcExpls[i] = valSrcScorers[i].Explain(doc);
+                               }
+                Explanation customExp = Enclosing_Instance.GetCustomScoreProvider(reader).CustomExplain(doc, subQueryExpl, valSrcExpls);
+                               float sc = GetValue() * customExp.GetValue();
+                               Explanation res = new ComplexExplanation(true, sc, Enclosing_Instance.ToString() + ", product of:");
+                               res.AddDetail(customExp);
+                               res.AddDetail(new Explanation(GetValue(), "queryBoost")); // actually using the q boost as q weight (== weight value)
+                               return res;
+                       }
+                       
+                       public override bool ScoresDocsOutOfOrder()
+                       {
+                               return false;
+                       }
+               }
+               
+               
+               //=========================== S C O R E R ============================
+               
+               /// <summary> A scorer that applies a (callback) function on scores of the subQuery.</summary>
+               private class CustomScorer:Scorer
+               {
+                       private void  InitBlock(CustomScoreQuery enclosingInstance)
+                       {
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private CustomScoreQuery enclosingInstance;
+                       public CustomScoreQuery Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       private CustomWeight weight;
+                       private float qWeight;
+                       private Scorer subQueryScorer;
+                       private Scorer[] valSrcScorers;
+                       private IndexReader reader;
+            private CustomScoreProvider provider;
+                       private float[] vScores; // reused in score() to avoid allocating this array for each doc 
+                       
+                       // constructor
+                       internal CustomScorer(CustomScoreQuery enclosingInstance, Similarity similarity, IndexReader reader, CustomWeight w, Scorer subQueryScorer, Scorer[] valSrcScorers):base(similarity)
+                       {
+                               InitBlock(enclosingInstance);
+                               this.weight = w;
+                               this.qWeight = w.GetValue();
+                               this.subQueryScorer = subQueryScorer;
+                               this.valSrcScorers = valSrcScorers;
+                               this.reader = reader;
+                               this.vScores = new float[valSrcScorers.Length];
+                this.provider = this.Enclosing_Instance.GetCustomScoreProvider(reader);
+                       }
+                       
+                       /// <deprecated> use {@link #NextDoc()} instead. 
+                       /// </deprecated>
+            [Obsolete("use NextDoc() instead.")]
+                       public override bool Next()
+                       {
+                               return NextDoc() != NO_MORE_DOCS;
+                       }
+                       
+                       public override int NextDoc()
+                       {
+                               int doc = subQueryScorer.NextDoc();
+                               if (doc != NO_MORE_DOCS)
+                               {
+                                       for (int i = 0; i < valSrcScorers.Length; i++)
+                                       {
+                                               valSrcScorers[i].Advance(doc);
+                                       }
+                               }
+                               return doc;
+                       }
+                       
+                       /// <deprecated> use {@link #DocID()} instead. 
+                       /// </deprecated>
+            [Obsolete("use DocID() instead.")]
+                       public override int Doc()
+                       {
+                               return subQueryScorer.Doc();
+                       }
+                       
+                       public override int DocID()
+                       {
+                               return subQueryScorer.DocID();
+                       }
+                       
+                       /*(non-Javadoc) @see Mono.Lucene.Net.Search.Scorer#score() */
+                       public override float Score()
+                       {
+                               for (int i = 0; i < valSrcScorers.Length; i++)
+                               {
+                                       vScores[i] = valSrcScorers[i].Score();
+                               }
+                return qWeight * provider.CustomScore(subQueryScorer.DocID(), subQueryScorer.Score(), vScores);
+                       }
+                       
+                       /// <deprecated> use {@link #Advance(int)} instead. 
+                       /// </deprecated>
+            [Obsolete("use Advance(int) instead.")]
+                       public override bool SkipTo(int target)
+                       {
+                               return Advance(target) != NO_MORE_DOCS;
+                       }
+                       
+                       public override int Advance(int target)
+                       {
+                               int doc = subQueryScorer.Advance(target);
+                               if (doc != NO_MORE_DOCS)
+                               {
+                                       for (int i = 0; i < valSrcScorers.Length; i++)
+                                       {
+                                               valSrcScorers[i].Advance(doc);
+                                       }
+                               }
+                               return doc;
+                       }
+                       
+                       // TODO: remove in 3.0
+                       /*(non-Javadoc) @see Mono.Lucene.Net.Search.Scorer#explain(int) */
+                       public override Explanation Explain(int doc)
+                       {
+                               Explanation subQueryExpl = weight.subQueryWeight.Explain(reader, doc);
+                               if (!subQueryExpl.IsMatch())
+                               {
+                                       return subQueryExpl;
+                               }
+                               // match
+                               Explanation[] valSrcExpls = new Explanation[valSrcScorers.Length];
+                               for (int i = 0; i < valSrcScorers.Length; i++)
+                               {
+                                       valSrcExpls[i] = valSrcScorers[i].Explain(doc);
+                               }
+                               Explanation customExp = Enclosing_Instance.CustomExplain(doc, subQueryExpl, valSrcExpls);
+                               float sc = qWeight * customExp.GetValue();
+                               Explanation res = new ComplexExplanation(true, sc, Enclosing_Instance.ToString() + ", product of:");
+                               res.AddDetail(customExp);
+                               res.AddDetail(new Explanation(qWeight, "queryBoost")); // actually using the q boost as q weight (== weight value)
+                               return res;
+                       }
+               }
+               
+               public override Weight CreateWeight(Searcher searcher)
+               {
+                       return new CustomWeight(this, searcher);
+               }
+               
+               /// <summary> Checks if this is strict custom scoring.
+               /// In strict custom scoring, the ValueSource part does not participate in weight normalization.
+               /// This may be useful when one wants full control over how scores are modified, and does 
+               /// not care about normalizing by the ValueSource part.
+               /// One particular case where this is useful if for testing this query.   
+               /// <p/>
+               /// Note: only has effect when the ValueSource part is not null.
+               /// </summary>
+               public virtual bool IsStrict()
+               {
+                       return strict;
+               }
+               
+               /// <summary> Set the strict mode of this query. </summary>
+               /// <param name="strict">The strict mode to set.
+               /// </param>
+               /// <seealso cref="IsStrict()">
+               /// </seealso>
+               public virtual void  SetStrict(bool strict)
+               {
+                       this.strict = strict;
+               }
+               
+               /// <summary> A short name of this query, used in {@link #ToString(String)}.</summary>
+               public virtual System.String Name()
+               {
+                       return "custom";
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Function/DocValues.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Function/DocValues.cs
new file mode 100644 (file)
index 0000000..7aa793c
--- /dev/null
@@ -0,0 +1,203 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using Explanation = Mono.Lucene.Net.Search.Explanation;
+
+namespace Mono.Lucene.Net.Search.Function
+{
+       
+       /// <summary> Expert: represents field values as different types.
+       /// Normally created via a 
+       /// {@link Mono.Lucene.Net.Search.Function.ValueSource ValueSuorce} 
+       /// for a particular field and reader.
+       /// 
+       /// <p/><font color="#FF0000">
+       /// WARNING: The status of the <b>Search.Function</b> package is experimental. 
+       /// The APIs introduced here might change in the future and will not be 
+       /// supported anymore in such a case.</font>
+       /// 
+       /// 
+       /// </summary>
+       public abstract class DocValues
+       {
+               /*
+               * DocValues is distinct from ValueSource because
+               * there needs to be an object created at query evaluation time that
+               * is not referenced by the query itself because:
+               * - Query objects should be MT safe
+               * - For caching, Query objects are often used as keys... you don't
+               *   want the Query carrying around big objects
+               */
+               
+               /// <summary> Return doc value as a float. 
+               /// <p/>Mandatory: every DocValues implementation must implement at least this method. 
+               /// </summary>
+               /// <param name="doc">document whose float value is requested. 
+               /// </param>
+               public abstract float FloatVal(int doc);
+               
+               /// <summary> Return doc value as an int. 
+               /// <p/>Optional: DocValues implementation can (but don't have to) override this method. 
+               /// </summary>
+               /// <param name="doc">document whose int value is requested.
+               /// </param>
+               public virtual int IntVal(int doc)
+               {
+                       return (int) FloatVal(doc);
+               }
+               
+               /// <summary> Return doc value as a long. 
+               /// <p/>Optional: DocValues implementation can (but don't have to) override this method. 
+               /// </summary>
+               /// <param name="doc">document whose long value is requested.
+               /// </param>
+               public virtual long LongVal(int doc)
+               {
+                       return (long) FloatVal(doc);
+               }
+               
+               /// <summary> Return doc value as a double. 
+               /// <p/>Optional: DocValues implementation can (but don't have to) override this method. 
+               /// </summary>
+               /// <param name="doc">document whose double value is requested.
+               /// </param>
+               public virtual double DoubleVal(int doc)
+               {
+                       return (double) FloatVal(doc);
+               }
+               
+               /// <summary> Return doc value as a string. 
+               /// <p/>Optional: DocValues implementation can (but don't have to) override this method. 
+               /// </summary>
+               /// <param name="doc">document whose string value is requested.
+               /// </param>
+               public virtual System.String StrVal(int doc)
+               {
+                       return FloatVal(doc).ToString();
+               }
+               
+               /// <summary> Return a string representation of a doc value, as reuired for Explanations.</summary>
+               public abstract System.String ToString(int doc);
+               
+               /// <summary> Explain the scoring value for the input doc.</summary>
+               public virtual Explanation Explain(int doc)
+               {
+                       return new Explanation(FloatVal(doc), ToString(doc));
+               }
+               
+               /// <summary> Expert: for test purposes only, return the inner array of values, or null if not applicable.
+               /// <p/>
+               /// Allows tests to verify that loaded values are:
+               /// <ol>
+               /// <li>indeed cached/reused.</li>
+               /// <li>stored in the expected size/type (byte/short/int/float).</li>
+               /// </ol>
+               /// Note: implementations of DocValues must override this method for 
+               /// these test elements to be tested, Otherwise the test would not fail, just 
+               /// print a warning.
+               /// </summary>
+               public /*internal*/ virtual System.Object GetInnerArray()
+               {
+                       throw new System.NotSupportedException("this optional method is for test purposes only");
+               }
+               
+               // --- some simple statistics on values
+               private float minVal = System.Single.NaN;
+               private float maxVal = System.Single.NaN;
+               private float avgVal = System.Single.NaN;
+               private bool computed = false;
+               // compute optional values
+               private void  Compute()
+               {
+                       if (computed)
+                       {
+                               return ;
+                       }
+                       float sum = 0;
+                       int n = 0;
+                       while (true)
+                       {
+                               float val;
+                               try
+                               {
+                                       val = FloatVal(n);
+                               }
+                               catch (System.IndexOutOfRangeException e)
+                               {
+                                       break;
+                               }
+                               sum += val;
+                               minVal = System.Single.IsNaN(minVal)?val:System.Math.Min(minVal, val);
+                               maxVal = System.Single.IsNaN(maxVal)?val:System.Math.Max(maxVal, val);
+                               ++n;
+                       }
+                       
+                       avgVal = n == 0?System.Single.NaN:sum / n;
+                       computed = true;
+               }
+               
+               /// <summary> Returns the minimum of all values or <code>Float.NaN</code> if this
+               /// DocValues instance does not contain any value.
+               /// <p/>
+               /// This operation is optional
+               /// <p/>
+               /// 
+               /// </summary>
+               /// <returns> the minimum of all values or <code>Float.NaN</code> if this
+               /// DocValues instance does not contain any value.
+               /// </returns>
+               public virtual float GetMinValue()
+               {
+                       Compute();
+                       return minVal;
+               }
+               
+               /// <summary> Returns the maximum of all values or <code>Float.NaN</code> if this
+               /// DocValues instance does not contain any value.
+               /// <p/>
+               /// This operation is optional
+               /// <p/>
+               /// 
+               /// </summary>
+               /// <returns> the maximum of all values or <code>Float.NaN</code> if this
+               /// DocValues instance does not contain any value.
+               /// </returns>
+               public virtual float GetMaxValue()
+               {
+                       Compute();
+                       return maxVal;
+               }
+               
+               /// <summary> Returns the average of all values or <code>Float.NaN</code> if this
+               /// DocValues instance does not contain any value. *
+               /// <p/>
+               /// This operation is optional
+               /// <p/>
+               /// 
+               /// </summary>
+               /// <returns> the average of all values or <code>Float.NaN</code> if this
+               /// DocValues instance does not contain any value
+               /// </returns>
+               public virtual float GetAverageValue()
+               {
+                       Compute();
+                       return avgVal;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Function/FieldCacheSource.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Function/FieldCacheSource.cs
new file mode 100644 (file)
index 0000000..eb042f6
--- /dev/null
@@ -0,0 +1,113 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+using FieldCache = Mono.Lucene.Net.Search.FieldCache;
+
+namespace Mono.Lucene.Net.Search.Function
+{
+       
+       /// <summary> Expert: A base class for ValueSource implementations that retrieve values for
+       /// a single field from the {@link Mono.Lucene.Net.Search.FieldCache FieldCache}.
+       /// <p/>
+       /// Fields used herein nust be indexed (doesn't matter if these fields are stored or not).
+       /// <p/> 
+       /// It is assumed that each such indexed field is untokenized, or at least has a single token in a document.
+       /// For documents with multiple tokens of the same field, behavior is undefined (It is likely that current 
+       /// code would use the value of one of these tokens, but this is not guaranteed).
+       /// <p/>
+       /// Document with no tokens in this field are assigned the <code>Zero</code> value.    
+       /// 
+       /// <p/><font color="#FF0000">
+       /// WARNING: The status of the <b>Search.Function</b> package is experimental. 
+       /// The APIs introduced here might change in the future and will not be 
+       /// supported anymore in such a case.</font>
+       /// 
+       /// <p/><b>NOTE</b>: with the switch in 2.9 to segment-based
+       /// searching, if {@link #getValues} is invoked with a
+       /// composite (multi-segment) reader, this can easily cause
+       /// double RAM usage for the values in the FieldCache.  It's
+       /// best to switch your application to pass only atomic
+       /// (single segment) readers to this API.  Alternatively, for
+       /// a short-term fix, you could wrap your ValueSource using
+       /// {@link MultiValueSource}, which costs more CPU per lookup
+       /// but will not consume double the FieldCache RAM.<p/>
+       /// </summary>
+       [Serializable]
+       public abstract class FieldCacheSource:ValueSource
+       {
+               private System.String field;
+               
+               /// <summary> Create a cached field source for the input field.  </summary>
+               public FieldCacheSource(System.String field)
+               {
+                       this.field = field;
+               }
+               
+               /* (non-Javadoc) @see Mono.Lucene.Net.Search.Function.ValueSource#getValues(Mono.Lucene.Net.Index.IndexReader) */
+               public override DocValues GetValues(IndexReader reader)
+               {
+                       return GetCachedFieldValues(Mono.Lucene.Net.Search.FieldCache_Fields.DEFAULT, field, reader);
+               }
+               
+               /* (non-Javadoc) @see Mono.Lucene.Net.Search.Function.ValueSource#description() */
+               public override System.String Description()
+               {
+                       return field;
+               }
+               
+               /// <summary> Return cached DocValues for input field and reader.</summary>
+               /// <param name="cache">FieldCache so that values of a field are loaded once per reader (RAM allowing)
+               /// </param>
+               /// <param name="field">Field for which values are required.
+               /// </param>
+               /// <seealso cref="ValueSource">
+               /// </seealso>
+               public abstract DocValues GetCachedFieldValues(FieldCache cache, System.String field, IndexReader reader);
+               
+               /*(non-Javadoc) @see java.lang.Object#equals(java.lang.Object) */
+               public  override bool Equals(System.Object o)
+               {
+                       if (!(o is FieldCacheSource))
+                       {
+                               return false;
+                       }
+                       FieldCacheSource other = (FieldCacheSource) o;
+                       return this.field.Equals(other.field) && CachedFieldSourceEquals(other);
+               }
+               
+               /*(non-Javadoc) @see java.lang.Object#hashCode() */
+               public override int GetHashCode()
+               {
+                       return field.GetHashCode() + CachedFieldSourceHashCode();
+               }
+               
+               /// <summary> Check if equals to another {@link FieldCacheSource}, already knowing that cache and field are equal.  </summary>
+               /// <seealso cref="Object.equals(java.lang.Object)">
+               /// </seealso>
+               public abstract bool CachedFieldSourceEquals(FieldCacheSource other);
+               
+               /// <summary> Return a hash code of a {@link FieldCacheSource}, without the hash-codes of the field 
+               /// and the cache (those are taken care of elsewhere).  
+               /// </summary>
+               /// <seealso cref="Object.hashCode()">
+               /// </seealso>
+               public abstract int CachedFieldSourceHashCode();
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Function/FieldScoreQuery.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Function/FieldScoreQuery.cs
new file mode 100644 (file)
index 0000000..6b2be2a
--- /dev/null
@@ -0,0 +1,139 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Search.Function
+{
+       
+       /// <summary> A query that scores each document as the value of the numeric input field.
+       /// <p/> 
+       /// The query matches all documents, and scores each document according to the numeric 
+       /// value of that field. 
+       /// <p/>
+       /// It is assumed, and expected, that:
+       /// <ul>
+       /// <li>The field used here is indexed, and has exactly 
+       /// one token in every scored document.</li> 
+       /// <li>Best if this field is un_tokenized.</li>
+       /// <li>That token is parsable to the selected type.</li>
+       /// </ul>
+       /// <p/>  
+       /// Combining this query in a FunctionQuery allows much freedom in affecting document scores.
+       /// Note, that with this freedom comes responsibility: it is more than likely that the
+       /// default Lucene scoring is superior in quality to scoring modified as explained here.
+       /// However, in some cases, and certainly for research experiments, this capability may turn useful.
+       /// <p/>
+       /// When contructing this query, select the appropriate type. That type should match the data stored in the
+       /// field. So in fact the "right" type should be selected before indexing. Type selection
+       /// has effect on the RAM usage: 
+       /// <ul>
+       /// <li>{@link Type#BYTE} consumes 1 * maxDocs bytes.</li>
+       /// <li>{@link Type#SHORT} consumes 2 * maxDocs bytes.</li>
+       /// <li>{@link Type#INT} consumes 4 * maxDocs bytes.</li>
+       /// <li>{@link Type#FLOAT} consumes 8 * maxDocs bytes.</li>
+       /// </ul>
+       /// <p/>
+       /// <b>Caching:</b>
+       /// Values for the numeric field are loaded once and cached in memory for further use with the same IndexReader. 
+       /// To take advantage of this, it is extremely important to reuse index-readers or index-searchers, 
+       /// otherwise, for instance if for each query a new index reader is opened, large penalties would be 
+       /// paid for loading the field values into memory over and over again!
+       /// 
+       /// <p/><font color="#FF0000">
+       /// WARNING: The status of the <b>Search.Function</b> package is experimental. 
+       /// The APIs introduced here might change in the future and will not be 
+       /// supported anymore in such a case.</font>
+       /// </summary>
+       [Serializable]
+       public class FieldScoreQuery:ValueSourceQuery
+       {
+               
+               /// <summary> Type of score field, indicating how field values are interpreted/parsed.  
+               /// <p/>
+               /// The type selected at search search time should match the data stored in the field. 
+               /// Different types have different RAM requirements: 
+               /// <ul>
+               /// <li>{@link #BYTE} consumes 1 * maxDocs bytes.</li>
+               /// <li>{@link #SHORT} consumes 2 * maxDocs bytes.</li>
+               /// <li>{@link #INT} consumes 4 * maxDocs bytes.</li>
+               /// <li>{@link #FLOAT} consumes 8 * maxDocs bytes.</li>
+               /// </ul>
+               /// </summary>
+               public class Type
+               {
+                       
+                       /// <summary>field values are interpreted as numeric byte values. </summary>
+                       public static readonly Type BYTE = new Type("byte");
+                       
+                       /// <summary>field values are interpreted as numeric short values. </summary>
+                       public static readonly Type SHORT = new Type("short");
+                       
+                       /// <summary>field values are interpreted as numeric int values. </summary>
+                       public static readonly Type INT = new Type("int");
+                       
+                       /// <summary>field values are interpreted as numeric float values. </summary>
+                       public static readonly Type FLOAT = new Type("float");
+                       
+                       private System.String typeName;
+                       internal Type(System.String name)
+                       {
+                               this.typeName = name;
+                       }
+                       /*(non-Javadoc) @see java.lang.Object#toString() */
+                       public override System.String ToString()
+                       {
+                               return GetType().FullName + "::" + typeName;
+                       }
+               }
+               
+               /// <summary> Create a FieldScoreQuery - a query that scores each document as the value of the numeric input field.
+               /// <p/>
+               /// The <code>type</code> param tells how to parse the field string values into a numeric score value.
+               /// </summary>
+               /// <param name="field">the numeric field to be used.
+               /// </param>
+               /// <param name="type">the type of the field: either
+               /// {@link Type#BYTE}, {@link Type#SHORT}, {@link Type#INT}, or {@link Type#FLOAT}. 
+               /// </param>
+               public FieldScoreQuery(System.String field, Type type):base(GetValueSource(field, type))
+               {
+               }
+               
+               // create the appropriate (cached) field value source.  
+               private static ValueSource GetValueSource(System.String field, Type type)
+               {
+                       if (type == Type.BYTE)
+                       {
+                               return new ByteFieldSource(field);
+                       }
+                       if (type == Type.SHORT)
+                       {
+                               return new ShortFieldSource(field);
+                       }
+                       if (type == Type.INT)
+                       {
+                               return new IntFieldSource(field);
+                       }
+                       if (type == Type.FLOAT)
+                       {
+                               return new FloatFieldSource(field);
+                       }
+                       throw new System.ArgumentException(type + " is not a known Field Score Query Type!");
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Function/FloatFieldSource.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Function/FloatFieldSource.cs
new file mode 100644 (file)
index 0000000..dae9034
--- /dev/null
@@ -0,0 +1,133 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+using FieldCache = Mono.Lucene.Net.Search.FieldCache;
+
+namespace Mono.Lucene.Net.Search.Function
+{
+       
+       /// <summary> Expert: obtains float field values from the 
+       /// {@link Mono.Lucene.Net.Search.FieldCache FieldCache}
+       /// using <code>getFloats()</code> and makes those values 
+       /// available as other numeric types, casting as needed.
+       /// 
+       /// <p/><font color="#FF0000">
+       /// WARNING: The status of the <b>Search.Function</b> package is experimental. 
+       /// The APIs introduced here might change in the future and will not be 
+       /// supported anymore in such a case.</font>
+       /// 
+       /// </summary>
+       /// <seealso cref="Mono.Lucene.Net.Search.Function.FieldCacheSource"> for requirements"
+       /// on the field.
+       /// 
+       /// <p/><b>NOTE</b>: with the switch in 2.9 to segment-based
+       /// searching, if {@link #getValues} is invoked with a
+       /// composite (multi-segment) reader, this can easily cause
+       /// double RAM usage for the values in the FieldCache.  It's
+       /// best to switch your application to pass only atomic
+       /// (single segment) readers to this API.  Alternatively, for
+       /// a short-term fix, you could wrap your ValueSource using
+       /// {@link MultiValueSource}, which costs more CPU per lookup
+       /// but will not consume double the FieldCache RAM.<p/>
+       /// </seealso>
+       [Serializable]
+       public class FloatFieldSource:FieldCacheSource
+       {
+               private class AnonymousClassDocValues:DocValues
+               {
+                       public AnonymousClassDocValues(float[] arr, FloatFieldSource enclosingInstance)
+                       {
+                               InitBlock(arr, enclosingInstance);
+                       }
+                       private void  InitBlock(float[] arr, FloatFieldSource enclosingInstance)
+                       {
+                               this.arr = arr;
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private float[] arr;
+                       private FloatFieldSource enclosingInstance;
+                       public FloatFieldSource Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       /*(non-Javadoc) @see Mono.Lucene.Net.Search.Function.DocValues#floatVal(int) */
+                       public override float FloatVal(int doc)
+                       {
+                               return arr[doc];
+                       }
+                       /*(non-Javadoc) @see Mono.Lucene.Net.Search.Function.DocValues#toString(int) */
+                       public override System.String ToString(int doc)
+                       {
+                               return Enclosing_Instance.Description() + '=' + arr[doc];
+                       }
+                       /*(non-Javadoc) @see Mono.Lucene.Net.Search.Function.DocValues#getInnerArray() */
+                       public /*internal*/ override System.Object GetInnerArray()
+                       {
+                               return arr;
+                       }
+               }
+               private Mono.Lucene.Net.Search.FloatParser parser;
+               
+               /// <summary> Create a cached float field source with default string-to-float parser. </summary>
+               public FloatFieldSource(System.String field):this(field, null)
+               {
+               }
+               
+               /// <summary> Create a cached float field source with a specific string-to-float parser. </summary>
+               public FloatFieldSource(System.String field, Mono.Lucene.Net.Search.FloatParser parser):base(field)
+               {
+                       this.parser = parser;
+               }
+               
+               /*(non-Javadoc) @see Mono.Lucene.Net.Search.Function.ValueSource#description() */
+               public override System.String Description()
+               {
+                       return "float(" + base.Description() + ')';
+               }
+               
+               /*(non-Javadoc) @see Mono.Lucene.Net.Search.Function.FieldCacheSource#getCachedValues(Mono.Lucene.Net.Search.FieldCache, java.lang.String, Mono.Lucene.Net.Index.IndexReader) */
+               public override DocValues GetCachedFieldValues(FieldCache cache, System.String field, IndexReader reader)
+               {
+                       float[] arr = cache.GetFloats(reader, field, parser);
+                       return new AnonymousClassDocValues(arr, this);
+               }
+               
+               /*(non-Javadoc) @see Mono.Lucene.Net.Search.Function.FieldCacheSource#cachedFieldSourceEquals(Mono.Lucene.Net.Search.Function.FieldCacheSource) */
+               public override bool CachedFieldSourceEquals(FieldCacheSource o)
+               {
+                       if (o.GetType() != typeof(FloatFieldSource))
+                       {
+                               return false;
+                       }
+                       FloatFieldSource other = (FloatFieldSource) o;
+                       return this.parser == null?other.parser == null:this.parser.GetType() == other.parser.GetType();
+               }
+               
+               /*(non-Javadoc) @see Mono.Lucene.Net.Search.Function.FieldCacheSource#cachedFieldSourceHashCode() */
+               public override int CachedFieldSourceHashCode()
+               {
+                       return parser == null?typeof(System.Single).GetHashCode():parser.GetType().GetHashCode();
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Function/IntFieldSource.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Function/IntFieldSource.cs
new file mode 100644 (file)
index 0000000..81115a6
--- /dev/null
@@ -0,0 +1,138 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+using FieldCache = Mono.Lucene.Net.Search.FieldCache;
+
+namespace Mono.Lucene.Net.Search.Function
+{
+       
+       /// <summary> Expert: obtains int field values from the 
+       /// {@link Mono.Lucene.Net.Search.FieldCache FieldCache}
+       /// using <code>getInts()</code> and makes those values 
+       /// available as other numeric types, casting as needed.
+       /// 
+       /// <p/><font color="#FF0000">
+       /// WARNING: The status of the <b>Search.Function</b> package is experimental. 
+       /// The APIs introduced here might change in the future and will not be 
+       /// supported anymore in such a case.</font>
+       /// 
+       /// </summary>
+       /// <seealso cref="Mono.Lucene.Net.Search.Function.FieldCacheSource"> for requirements
+       /// on the field.
+       /// 
+       /// <p/><b>NOTE</b>: with the switch in 2.9 to segment-based
+       /// searching, if {@link #getValues} is invoked with a
+       /// composite (multi-segment) reader, this can easily cause
+       /// double RAM usage for the values in the FieldCache.  It's
+       /// best to switch your application to pass only atomic
+       /// (single segment) readers to this API.  Alternatively, for
+       /// a short-term fix, you could wrap your ValueSource using
+       /// {@link MultiValueSource}, which costs more CPU per lookup
+       /// but will not consume double the FieldCache RAM.<p/>
+       /// </seealso>
+       [Serializable]
+       public class IntFieldSource:FieldCacheSource
+       {
+               private class AnonymousClassDocValues:DocValues
+               {
+                       public AnonymousClassDocValues(int[] arr, IntFieldSource enclosingInstance)
+                       {
+                               InitBlock(arr, enclosingInstance);
+                       }
+                       private void  InitBlock(int[] arr, IntFieldSource enclosingInstance)
+                       {
+                               this.arr = arr;
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private int[] arr;
+                       private IntFieldSource enclosingInstance;
+                       public IntFieldSource Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       /*(non-Javadoc) @see Mono.Lucene.Net.Search.Function.DocValues#floatVal(int) */
+                       public override float FloatVal(int doc)
+                       {
+                               return (float) arr[doc];
+                       }
+                       /*(non-Javadoc) @see Mono.Lucene.Net.Search.Function.DocValues#intVal(int) */
+                       public override int IntVal(int doc)
+                       {
+                               return arr[doc];
+                       }
+                       /*(non-Javadoc) @see Mono.Lucene.Net.Search.Function.DocValues#toString(int) */
+                       public override System.String ToString(int doc)
+                       {
+                               return Enclosing_Instance.Description() + '=' + IntVal(doc);
+                       }
+                       /*(non-Javadoc) @see Mono.Lucene.Net.Search.Function.DocValues#getInnerArray() */
+                       public /*internal*/ override System.Object GetInnerArray()
+                       {
+                               return arr;
+                       }
+               }
+               private Mono.Lucene.Net.Search.IntParser parser;
+               
+               /// <summary> Create a cached int field source with default string-to-int parser. </summary>
+               public IntFieldSource(System.String field):this(field, null)
+               {
+               }
+               
+               /// <summary> Create a cached int field source with a specific string-to-int parser. </summary>
+               public IntFieldSource(System.String field, Mono.Lucene.Net.Search.IntParser parser):base(field)
+               {
+                       this.parser = parser;
+               }
+               
+               /*(non-Javadoc) @see Mono.Lucene.Net.Search.Function.ValueSource#description() */
+               public override System.String Description()
+               {
+                       return "int(" + base.Description() + ')';
+               }
+               
+               /*(non-Javadoc) @see Mono.Lucene.Net.Search.Function.FieldCacheSource#getCachedValues(Mono.Lucene.Net.Search.FieldCache, java.lang.String, Mono.Lucene.Net.Index.IndexReader) */
+               public override DocValues GetCachedFieldValues(FieldCache cache, System.String field, IndexReader reader)
+               {
+                       int[] arr = cache.GetInts(reader, field, parser);
+                       return new AnonymousClassDocValues(arr, this);
+               }
+               
+               /*(non-Javadoc) @see Mono.Lucene.Net.Search.Function.FieldCacheSource#cachedFieldSourceEquals(Mono.Lucene.Net.Search.Function.FieldCacheSource) */
+               public override bool CachedFieldSourceEquals(FieldCacheSource o)
+               {
+                       if (o.GetType() != typeof(IntFieldSource))
+                       {
+                               return false;
+                       }
+                       IntFieldSource other = (IntFieldSource) o;
+                       return this.parser == null?other.parser == null:this.parser.GetType() == other.parser.GetType();
+               }
+               
+               /*(non-Javadoc) @see Mono.Lucene.Net.Search.Function.FieldCacheSource#cachedFieldSourceHashCode() */
+               public override int CachedFieldSourceHashCode()
+               {
+                       return parser == null?typeof(System.Int32).GetHashCode():parser.GetType().GetHashCode();
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Function/MultiValueSource.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Function/MultiValueSource.cs
new file mode 100644 (file)
index 0000000..db8e79b
--- /dev/null
@@ -0,0 +1,165 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+using ReaderUtil = Mono.Lucene.Net.Util.ReaderUtil;
+using Explanation = Mono.Lucene.Net.Search.Explanation;
+
+namespace Mono.Lucene.Net.Search.Function
+{
+       
+       /// <summary>This class wraps another ValueSource, but protects
+       /// against accidental double RAM usage in FieldCache when
+       /// a composite reader is passed to {@link #getValues}.
+       /// 
+       /// <p/><b>NOTE</b>: this class adds a CPU penalty to every
+       /// lookup, as it must resolve the incoming document to the
+       /// right sub-reader using a binary search.<p/>
+       /// 
+       /// </summary>
+       /// <deprecated> This class is temporary, to ease the
+       /// migration to segment-based searching. Please change your
+       /// code to not pass composite readers to these APIs. 
+       /// </deprecated>
+    [Obsolete("This class is temporary, to ease the migration to segment-based searching. Please change your code to not pass composite readers to these APIs. ")]
+       [Serializable]
+       public sealed class MultiValueSource:ValueSource
+       {
+               
+               internal ValueSource other;
+               public MultiValueSource(ValueSource other)
+               {
+                       this.other = other;
+               }
+               
+               public override DocValues GetValues(IndexReader reader)
+               {
+                       
+                       IndexReader[] subReaders = reader.GetSequentialSubReaders();
+                       if (subReaders != null)
+                       {
+                               // This is a composite reader
+                               return new MultiDocValues(this, subReaders);
+                       }
+                       else
+                       {
+                               // Already an atomic reader -- just delegate
+                               return other.GetValues(reader);
+                       }
+               }
+               
+               public override System.String Description()
+               {
+                       return other.Description();
+               }
+               
+               public  override bool Equals(System.Object o)
+               {
+                       if (o is MultiValueSource)
+                       {
+                               return ((MultiValueSource) o).other.Equals(other);
+                       }
+                       else
+                       {
+                               return false;
+                       }
+               }
+               
+               public override int GetHashCode()
+               {
+                       return 31 * other.GetHashCode();
+               }
+               
+               private sealed class MultiDocValues:DocValues
+               {
+                       private void  InitBlock(MultiValueSource enclosingInstance)
+                       {
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private MultiValueSource enclosingInstance;
+                       public MultiValueSource Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       
+                       internal DocValues[] docValues;
+                       internal int[] docStarts;
+                       
+                       internal MultiDocValues(MultiValueSource enclosingInstance, IndexReader[] subReaders)
+                       {
+                               InitBlock(enclosingInstance);
+                               docValues = new DocValues[subReaders.Length];
+                               docStarts = new int[subReaders.Length];
+                               int base_Renamed = 0;
+                               for (int i = 0; i < subReaders.Length; i++)
+                               {
+                                       docValues[i] = Enclosing_Instance.other.GetValues(subReaders[i]);
+                                       docStarts[i] = base_Renamed;
+                                       base_Renamed += subReaders[i].MaxDoc();
+                               }
+                       }
+                       
+                       public override float FloatVal(int doc)
+                       {
+                               int n = ReaderUtil.SubIndex(doc, docStarts);
+                               return docValues[n].FloatVal(doc - docStarts[n]);
+                       }
+                       
+                       public override int IntVal(int doc)
+                       {
+                               int n = ReaderUtil.SubIndex(doc, docStarts);
+                               return docValues[n].IntVal(doc - docStarts[n]);
+                       }
+                       
+                       public override long LongVal(int doc)
+                       {
+                               int n = ReaderUtil.SubIndex(doc, docStarts);
+                               return docValues[n].LongVal(doc - docStarts[n]);
+                       }
+                       
+                       public override double DoubleVal(int doc)
+                       {
+                               int n = ReaderUtil.SubIndex(doc, docStarts);
+                               return docValues[n].DoubleVal(doc - docStarts[n]);
+                       }
+                       
+                       public override System.String StrVal(int doc)
+                       {
+                               int n = ReaderUtil.SubIndex(doc, docStarts);
+                               return docValues[n].StrVal(doc - docStarts[n]);
+                       }
+                       
+                       public override System.String ToString(int doc)
+                       {
+                               int n = ReaderUtil.SubIndex(doc, docStarts);
+                               return docValues[n].ToString(doc - docStarts[n]);
+                       }
+                       
+                       public override Explanation Explain(int doc)
+                       {
+                               int n = ReaderUtil.SubIndex(doc, docStarts);
+                               return docValues[n].Explain(doc - docStarts[n]);
+                       }
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Function/OrdFieldSource.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Function/OrdFieldSource.cs
new file mode 100644 (file)
index 0000000..4322b0c
--- /dev/null
@@ -0,0 +1,148 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+using FieldCache = Mono.Lucene.Net.Search.FieldCache;
+
+namespace Mono.Lucene.Net.Search.Function
+{
+       
+       /// <summary> Expert: obtains the ordinal of the field value from the default Lucene 
+       /// {@link Mono.Lucene.Net.Search.FieldCache Fieldcache} using getStringIndex().
+       /// <p/>
+       /// The native lucene index order is used to assign an ordinal value for each field value.
+       /// <p/>
+       /// Field values (terms) are lexicographically ordered by unicode value, and numbered starting at 1.
+       /// <p/>
+       /// Example:
+       /// <br/>If there were only three field values: "apple","banana","pear"
+       /// <br/>then ord("apple")=1, ord("banana")=2, ord("pear")=3
+       /// <p/>
+       /// WARNING: 
+       /// ord() depends on the position in an index and can thus change 
+       /// when other documents are inserted or deleted,
+       /// or if a MultiSearcher is used. 
+       /// 
+       /// <p/><font color="#FF0000">
+       /// WARNING: The status of the <b>Search.Function</b> package is experimental. 
+       /// The APIs introduced here might change in the future and will not be 
+       /// supported anymore in such a case.</font>
+       /// 
+       /// <p/><b>NOTE</b>: with the switch in 2.9 to segment-based
+       /// searching, if {@link #getValues} is invoked with a
+       /// composite (multi-segment) reader, this can easily cause
+       /// double RAM usage for the values in the FieldCache.  It's
+       /// best to switch your application to pass only atomic
+       /// (single segment) readers to this API.  Alternatively, for
+       /// a short-term fix, you could wrap your ValueSource using
+       /// {@link MultiValueSource}, which costs more CPU per lookup
+       /// but will not consume double the FieldCache RAM.<p/>
+       /// </summary>
+       
+       [Serializable]
+       public class OrdFieldSource:ValueSource
+       {
+               private class AnonymousClassDocValues:DocValues
+               {
+                       public AnonymousClassDocValues(int[] arr, OrdFieldSource enclosingInstance)
+                       {
+                               InitBlock(arr, enclosingInstance);
+                       }
+                       private void  InitBlock(int[] arr, OrdFieldSource enclosingInstance)
+                       {
+                               this.arr = arr;
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private int[] arr;
+                       private OrdFieldSource enclosingInstance;
+                       public OrdFieldSource Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       /*(non-Javadoc) @see Mono.Lucene.Net.Search.Function.DocValues#floatVal(int) */
+                       public override float FloatVal(int doc)
+                       {
+                               return (float) arr[doc];
+                       }
+                       /*(non-Javadoc) @see Mono.Lucene.Net.Search.Function.DocValues#strVal(int) */
+                       public override System.String StrVal(int doc)
+                       {
+                               // the string value of the ordinal, not the string itself
+                               return System.Convert.ToString(arr[doc]);
+                       }
+                       /*(non-Javadoc) @see Mono.Lucene.Net.Search.Function.DocValues#toString(int) */
+                       public override System.String ToString(int doc)
+                       {
+                               return Enclosing_Instance.Description() + '=' + IntVal(doc);
+                       }
+                       /*(non-Javadoc) @see Mono.Lucene.Net.Search.Function.DocValues#getInnerArray() */
+                       public /*internal*/ override System.Object GetInnerArray()
+                       {
+                               return arr;
+                       }
+               }
+               protected internal System.String field;
+               
+               /// <summary> Constructor for a certain field.</summary>
+               /// <param name="field">field whose values order is used.  
+               /// </param>
+               public OrdFieldSource(System.String field)
+               {
+                       this.field = field;
+               }
+               
+               /*(non-Javadoc) @see Mono.Lucene.Net.Search.Function.ValueSource#description() */
+               public override System.String Description()
+               {
+                       return "ord(" + field + ')';
+               }
+               
+               /*(non-Javadoc) @see Mono.Lucene.Net.Search.Function.ValueSource#getValues(Mono.Lucene.Net.Index.IndexReader) */
+               public override DocValues GetValues(IndexReader reader)
+               {
+                       int[] arr = Mono.Lucene.Net.Search.FieldCache_Fields.DEFAULT.GetStringIndex(reader, field).order;
+                       return new AnonymousClassDocValues(arr, this);
+               }
+               
+               /*(non-Javadoc) @see java.lang.Object#equals(java.lang.Object) */
+               public  override bool Equals(System.Object o)
+               {
+                       if (o.GetType() != typeof(OrdFieldSource))
+                               return false;
+                       OrdFieldSource other = (OrdFieldSource) o;
+                       return this.field.Equals(other.field);
+               }
+               
+               private static readonly int hcode;
+               
+               /*(non-Javadoc) @see java.lang.Object#hashCode() */
+               public override int GetHashCode()
+               {
+                       return hcode + field.GetHashCode();
+               }
+               static OrdFieldSource()
+               {
+                       hcode = typeof(OrdFieldSource).GetHashCode();
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Function/Package.html b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Function/Package.html
new file mode 100644 (file)
index 0000000..659f35e
--- /dev/null
@@ -0,0 +1,195 @@
+<HTML>\r
+<!--\r
+ Licensed to the Apache Software Foundation (ASF) under one or more\r
+ contributor license agreements.  See the NOTICE file distributed with\r
+ this work for additional information regarding copyright ownership.\r
+ The ASF licenses this file to You under the Apache License, Version 2.0\r
+ (the "License"); you may not use this file except in compliance with\r
+ the License.  You may obtain a copy of the License at\r
+\r
+     http://www.apache.org/licenses/LICENSE-2.0\r
+\r
+ Unless required by applicable law or agreed to in writing, software\r
+ distributed under the License is distributed on an "AS IS" BASIS,\r
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
+ See the License for the specific language governing permissions and\r
+ limitations under the License.\r
+-->\r
+<HEAD>\r
+  <TITLE>org.apache.lucene.search.function</TITLE>\r
+</HEAD>\r
+<BODY>\r
+<DIV>\r
+  Programmatic control over documents scores.\r
+</DIV>\r
+<DIV>\r
+  The <code>function</code> package provides tight control over documents scores.\r
+</DIV>\r
+<DIV>\r
+<font color="#FF0000">\r
+WARNING: The status of the <b>search.function</b> package is experimental. The APIs\r
+introduced here might change in the future and will not be supported anymore\r
+in such a case.\r
+</font>\r
+</DIV>\r
+<DIV>\r
+  Two types of queries are available in this package:\r
+</DIV>\r
+<DIV>\r
+  <ol>\r
+     <li>\r
+        <b>Custom Score queries</b> - allowing to set the score\r
+        of a matching document as a mathematical expression over scores\r
+        of that document by contained (sub) queries.\r
+     </li>\r
+     <li>\r
+        <b>Field score queries</b> - allowing to base the score of a\r
+        document on <b>numeric values</b> of <b>indexed fields</b>.\r
+     </li>\r
+  </ol>\r
+</DIV>\r
+<DIV>&nbsp;</DIV>\r
+<DIV>\r
+  <b>Some possible uses of these queries:</b>\r
+</DIV>\r
+<DIV>\r
+  <ol>\r
+     <li>\r
+        Normalizing the document scores by values indexed in a special field -\r
+        for instance, experimenting with a different doc length normalization.\r
+     </li>\r
+     <li>\r
+        Introducing some static scoring element, to the score of a document, -\r
+        for instance using some topological attribute of the links to/from a document.\r
+     </li>\r
+     <li>\r
+        Computing the score of a matching document as an arbitrary odd function of\r
+        its score by a certain query.\r
+     </li>\r
+  </ol>\r
+</DIV>\r
+<DIV>\r
+  <b>Performance and Quality Considerations:</b>\r
+</DIV>\r
+<DIV>\r
+  <ol>\r
+     <li>\r
+       When scoring by values of indexed fields,\r
+       these values are loaded into memory.\r
+       Unlike the regular scoring, where the required information is read from\r
+       disk as necessary, here field values are loaded once and cached by Lucene in memory\r
+       for further use, anticipating reuse by further queries. While all this is carefully\r
+       cached with performance in mind, it is recommended to\r
+       use these features only when the default Lucene scoring does\r
+       not match your "special" application needs.\r
+     </li>\r
+     <li>\r
+        Use only with carefully selected fields, because in most cases,\r
+        search quality with regular Lucene scoring\r
+        would outperform that of scoring by field values.\r
+     </li>\r
+     <li>\r
+        Values of fields used for scoring should match.\r
+        Do not apply on a field containing arbitrary (long) text.\r
+        Do not mix values in the same field if that field is used for scoring.\r
+     </li>\r
+     <li>\r
+        Smaller (shorter) field tokens means less RAM (something always desired).\r
+        When using <a href = FieldScoreQuery.html>FieldScoreQuery</a>,\r
+        select the shortest <a href = FieldScoreQuery.html#Type>FieldScoreQuery.Type</a>\r
+        that is sufficient for the used field values.\r
+     </li>\r
+     <li>\r
+        Reusing IndexReaders/IndexSearchers is essential, because the caching of field tokens\r
+        is based on an IndexReader. Whenever a new IndexReader is used, values currently in the cache\r
+        cannot be used and new values must be loaded from disk. So replace/refresh readers/searchers in\r
+        a controlled manner.\r
+     </li>\r
+  </ol>\r
+</DIV>\r
+<DIV>\r
+  <b>History and Credits:</b>\r
+  <ul>\r
+    <li>\r
+       A large part of the code of this package was originated from Yonik's FunctionQuery code that was\r
+       imported from <a href = "http://lucene.apache.org//solr">Solr</a>\r
+       (see <a href = "http://issues.apache.org//jira/browse/LUCENE-446">LUCENE-446</a>).\r
+    </li>\r
+    <li>\r
+       The idea behind CustomScoreQurey is borrowed from\r
+       the "Easily create queries that transform sub-query scores arbitrarily" contribution by Mike Klaas\r
+       (see <a href = "http://issues.apache.org//jira/browse/LUCENE-850">LUCENE-850</a>)\r
+       though the implementation and API here are different.\r
+    </li>\r
+  </ul>\r
+</DIV>\r
+<DIV>\r
+ <b>Code sample:</b>\r
+ <P>\r
+ Note: code snippets here should work, but they were never really compiled... so,\r
+ tests sources under TestCustomScoreQuery, TestFieldScoreQuery and TestOrdValues\r
+ may also be useful.\r
+ <ol>\r
+  <li>\r
+    Using field (byte) values to as scores:\r
+    <p>\r
+    Indexing:\r
+    <pre>\r
+      f = new Field("score", "7", Field.Store.NO, Field.Index.UN_TOKENIZED);\r
+      f.setOmitNorms(true);\r
+      d1.add(f);\r
+    </pre>\r
+    <p>\r
+    Search:\r
+    <pre>\r
+      Query q = new FieldScoreQuery("score", FieldScoreQuery.Type.BYTE);\r
+    </pre>\r
+    Document d1 above would get a score of 7.\r
+  </li>\r
+  <p>\r
+  <li>\r
+    Manipulating scores\r
+    <p>\r
+    Dividing the original score of each document by a square root of its docid\r
+    (just to demonstrate what it takes to manipulate scores this way)\r
+    <pre>\r
+      Query q = queryParser.parse("my query text");\r
+      CustomScoreQuery customQ = new CustomScoreQuery(q) {\r
+        public float customScore(int doc, float subQueryScore, float valSrcScore) {\r
+          return subQueryScore / Math.sqrt(docid);\r
+        }\r
+      };\r
+    </pre>\r
+        <p>\r
+        For more informative debug info on the custom query, also override the name() method:\r
+        <pre>\r
+      CustomScoreQuery customQ = new CustomScoreQuery(q) {\r
+        public float customScore(int doc, float subQueryScore, float valSrcScore) {\r
+          return subQueryScore / Math.sqrt(docid);\r
+        }\r
+        public String name() {\r
+          return "1/sqrt(docid)";\r
+        }\r
+      };\r
+    </pre>\r
+        <p>\r
+        Taking the square root of the original score and multiplying it by a "short field driven score", ie, the\r
+        short value that was indexed for the scored doc in a certain field:\r
+        <pre>\r
+      Query q = queryParser.parse("my query text");\r
+      FieldScoreQuery qf = new FieldScoreQuery("shortScore", FieldScoreQuery.Type.SHORT);\r
+      CustomScoreQuery customQ = new CustomScoreQuery(q,qf) {\r
+        public float customScore(int doc, float subQueryScore, float valSrcScore) {\r
+          return Math.sqrt(subQueryScore) * valSrcScore;\r
+        }\r
+        public String name() {\r
+          return "shortVal*sqrt(score)";\r
+        }\r
+      };\r
+    </pre>\r
+\r
+  </li>\r
+ </ol>\r
+</DIV>\r
+</BODY>\r
+</HTML>
\ No newline at end of file
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Function/ReverseOrdFieldSource.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Function/ReverseOrdFieldSource.cs
new file mode 100644 (file)
index 0000000..1c05056
--- /dev/null
@@ -0,0 +1,160 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+using FieldCache = Mono.Lucene.Net.Search.FieldCache;
+
+namespace Mono.Lucene.Net.Search.Function
+{
+       
+       /// <summary> Expert: obtains the ordinal of the field value from the default Lucene 
+       /// {@link Mono.Lucene.Net.Search.FieldCache FieldCache} using getStringIndex()
+       /// and reverses the order.
+       /// <p/>
+       /// The native lucene index order is used to assign an ordinal value for each field value.
+       /// <p/>
+       /// Field values (terms) are lexicographically ordered by unicode value, and numbered starting at 1.
+       /// <br/>
+       /// Example of reverse ordinal (rord):
+       /// <br/>If there were only three field values: "apple","banana","pear"
+       /// <br/>then rord("apple")=3, rord("banana")=2, ord("pear")=1
+       /// <p/>
+       /// WARNING: 
+       /// rord() depends on the position in an index and can thus change 
+       /// when other documents are inserted or deleted,
+       /// or if a MultiSearcher is used. 
+       /// 
+       /// <p/><font color="#FF0000">
+       /// WARNING: The status of the <b>Search.Function</b> package is experimental. 
+       /// The APIs introduced here might change in the future and will not be 
+       /// supported anymore in such a case.</font>
+       /// 
+       /// <p/><b>NOTE</b>: with the switch in 2.9 to segment-based
+       /// searching, if {@link #getValues} is invoked with a
+       /// composite (multi-segment) reader, this can easily cause
+       /// double RAM usage for the values in the FieldCache.  It's
+       /// best to switch your application to pass only atomic
+       /// (single segment) readers to this API.  Alternatively, for
+       /// a short-term fix, you could wrap your ValueSource using
+       /// {@link MultiValueSource}, which costs more CPU per lookup
+       /// but will not consume double the FieldCache RAM.<p/>
+       /// </summary>
+       
+       [Serializable]
+       public class ReverseOrdFieldSource:ValueSource
+       {
+               private class AnonymousClassDocValues:DocValues
+               {
+                       public AnonymousClassDocValues(int end, int[] arr, ReverseOrdFieldSource enclosingInstance)
+                       {
+                               InitBlock(end, arr, enclosingInstance);
+                       }
+                       private void  InitBlock(int end, int[] arr, ReverseOrdFieldSource enclosingInstance)
+                       {
+                               this.end = end;
+                               this.arr = arr;
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private int end;
+                       private int[] arr;
+                       private ReverseOrdFieldSource enclosingInstance;
+                       public ReverseOrdFieldSource Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       /*(non-Javadoc) @see Mono.Lucene.Net.Search.Function.DocValues#floatVal(int) */
+                       public override float FloatVal(int doc)
+                       {
+                               return (float) (end - arr[doc]);
+                       }
+                       /* (non-Javadoc) @see Mono.Lucene.Net.Search.Function.DocValues#intVal(int) */
+                       public override int IntVal(int doc)
+                       {
+                               return end - arr[doc];
+                       }
+                       /* (non-Javadoc) @see Mono.Lucene.Net.Search.Function.DocValues#strVal(int) */
+                       public override System.String StrVal(int doc)
+                       {
+                               // the string value of the ordinal, not the string itself
+                               return System.Convert.ToString(IntVal(doc));
+                       }
+                       /*(non-Javadoc) @see Mono.Lucene.Net.Search.Function.DocValues#toString(int) */
+                       public override System.String ToString(int doc)
+                       {
+                               return Enclosing_Instance.Description() + '=' + StrVal(doc);
+                       }
+                       /*(non-Javadoc) @see Mono.Lucene.Net.Search.Function.DocValues#getInnerArray() */
+                       public /*internal*/ override System.Object GetInnerArray()
+                       {
+                               return arr;
+                       }
+               }
+               public System.String field;
+               
+               /// <summary> Contructor for a certain field.</summary>
+               /// <param name="field">field whose values reverse order is used.  
+               /// </param>
+               public ReverseOrdFieldSource(System.String field)
+               {
+                       this.field = field;
+               }
+               
+               /*(non-Javadoc) @see Mono.Lucene.Net.Search.Function.ValueSource#description() */
+               public override System.String Description()
+               {
+                       return "rord(" + field + ')';
+               }
+               
+               /*(non-Javadoc) @see Mono.Lucene.Net.Search.Function.ValueSource#getValues(Mono.Lucene.Net.Index.IndexReader) */
+               public override DocValues GetValues(IndexReader reader)
+               {
+                       Mono.Lucene.Net.Search.StringIndex sindex = Mono.Lucene.Net.Search.FieldCache_Fields.DEFAULT.GetStringIndex(reader, field);
+                       
+                       int[] arr = sindex.order;
+                       int end = sindex.lookup.Length;
+                       
+                       return new AnonymousClassDocValues(end, arr, this);
+               }
+               
+               /*(non-Javadoc) @see java.lang.Object#equals(java.lang.Object) */
+               public  override bool Equals(System.Object o)
+               {
+                       if (o.GetType() != typeof(ReverseOrdFieldSource))
+                               return false;
+                       ReverseOrdFieldSource other = (ReverseOrdFieldSource) o;
+                       return this.field.Equals(other.field);
+               }
+               
+               private static readonly int hcode;
+               
+               /*(non-Javadoc) @see java.lang.Object#hashCode() */
+               public override int GetHashCode()
+               {
+                       return hcode + field.GetHashCode();
+               }
+               static ReverseOrdFieldSource()
+               {
+                       hcode = typeof(ReverseOrdFieldSource).GetHashCode();
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Function/ShortFieldSource.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Function/ShortFieldSource.cs
new file mode 100644 (file)
index 0000000..650a847
--- /dev/null
@@ -0,0 +1,138 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+using FieldCache = Mono.Lucene.Net.Search.FieldCache;
+
+namespace Mono.Lucene.Net.Search.Function
+{
+       
+       /// <summary> Expert: obtains short field values from the 
+       /// {@link Mono.Lucene.Net.Search.FieldCache FieldCache}
+       /// using <code>getShorts()</code> and makes those values 
+       /// available as other numeric types, casting as needed.
+       /// 
+       /// <p/><font color="#FF0000">
+       /// WARNING: The status of the <b>Search.Function</b> package is experimental. 
+       /// The APIs introduced here might change in the future and will not be 
+       /// supported anymore in such a case.</font>
+       /// 
+       /// </summary>
+       /// <seealso cref="Mono.Lucene.Net.Search.Function.FieldCacheSource"> for requirements
+       /// on the field.
+       /// 
+       /// <p/><b>NOTE</b>: with the switch in 2.9 to segment-based
+       /// searching, if {@link #getValues} is invoked with a
+       /// composite (multi-segment) reader, this can easily cause
+       /// double RAM usage for the values in the FieldCache.  It's
+       /// best to switch your application to pass only atomic
+       /// (single segment) readers to this API.  Alternatively, for
+       /// a short-term fix, you could wrap your ValueSource using
+       /// {@link MultiValueSource}, which costs more CPU per lookup
+       /// but will not consume double the FieldCache RAM.<p/>
+       /// </seealso>
+       [Serializable]
+       public class ShortFieldSource:FieldCacheSource
+       {
+               private class AnonymousClassDocValues:DocValues
+               {
+                       public AnonymousClassDocValues(short[] arr, ShortFieldSource enclosingInstance)
+                       {
+                               InitBlock(arr, enclosingInstance);
+                       }
+                       private void  InitBlock(short[] arr, ShortFieldSource enclosingInstance)
+                       {
+                               this.arr = arr;
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private short[] arr;
+                       private ShortFieldSource enclosingInstance;
+                       public ShortFieldSource Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       /*(non-Javadoc) @see Mono.Lucene.Net.Search.Function.DocValues#floatVal(int) */
+                       public override float FloatVal(int doc)
+                       {
+                               return (float) arr[doc];
+                       }
+                       /*(non-Javadoc) @see Mono.Lucene.Net.Search.Function.DocValues#intVal(int) */
+                       public override int IntVal(int doc)
+                       {
+                               return arr[doc];
+                       }
+                       /*(non-Javadoc) @see Mono.Lucene.Net.Search.Function.DocValues#toString(int) */
+                       public override System.String ToString(int doc)
+                       {
+                               return Enclosing_Instance.Description() + '=' + IntVal(doc);
+                       }
+                       /*(non-Javadoc) @see Mono.Lucene.Net.Search.Function.DocValues#getInnerArray() */
+                       public /*internal*/ override System.Object GetInnerArray()
+                       {
+                               return arr;
+                       }
+               }
+               private Mono.Lucene.Net.Search.ShortParser parser;
+               
+               /// <summary> Create a cached short field source with default string-to-short parser. </summary>
+               public ShortFieldSource(System.String field):this(field, null)
+               {
+               }
+               
+               /// <summary> Create a cached short field source with a specific string-to-short parser. </summary>
+               public ShortFieldSource(System.String field, Mono.Lucene.Net.Search.ShortParser parser):base(field)
+               {
+                       this.parser = parser;
+               }
+               
+               /*(non-Javadoc) @see Mono.Lucene.Net.Search.Function.ValueSource#description() */
+               public override System.String Description()
+               {
+                       return "short(" + base.Description() + ')';
+               }
+               
+               /*(non-Javadoc) @see Mono.Lucene.Net.Search.Function.FieldCacheSource#getCachedValues(Mono.Lucene.Net.Search.FieldCache, java.lang.String, Mono.Lucene.Net.Index.IndexReader) */
+               public override DocValues GetCachedFieldValues(FieldCache cache, System.String field, IndexReader reader)
+               {
+                       short[] arr = cache.GetShorts(reader, field, parser);
+                       return new AnonymousClassDocValues(arr, this);
+               }
+               
+               /*(non-Javadoc) @see Mono.Lucene.Net.Search.Function.FieldCacheSource#cachedFieldSourceEquals(Mono.Lucene.Net.Search.Function.FieldCacheSource) */
+               public override bool CachedFieldSourceEquals(FieldCacheSource o)
+               {
+                       if (o.GetType() != typeof(ShortFieldSource))
+                       {
+                               return false;
+                       }
+                       ShortFieldSource other = (ShortFieldSource) o;
+                       return this.parser == null?other.parser == null:this.parser.GetType() == other.parser.GetType();
+               }
+               
+               /*(non-Javadoc) @see Mono.Lucene.Net.Search.Function.FieldCacheSource#cachedFieldSourceHashCode() */
+               public override int CachedFieldSourceHashCode()
+               {
+                       return parser == null?typeof(System.Int16).GetHashCode():parser.GetType().GetHashCode();
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Function/ValueSource.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Function/ValueSource.cs
new file mode 100644 (file)
index 0000000..45aba6a
--- /dev/null
@@ -0,0 +1,69 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+
+namespace Mono.Lucene.Net.Search.Function
+{
+       
+       /// <summary> Expert: source of values for basic function queries.
+       /// <p/>At its default/simplest form, values - one per doc - are used as the score of that doc.
+       /// <p/>Values are instantiated as 
+       /// {@link Mono.Lucene.Net.Search.Function.DocValues DocValues} for a particular reader.
+       /// <p/>ValueSource implementations differ in RAM requirements: it would always be a factor
+       /// of the number of documents, but for each document the number of bytes can be 1, 2, 4, or 8. 
+       /// 
+       /// <p/><font color="#FF0000">
+       /// WARNING: The status of the <b>Search.Function</b> package is experimental. 
+       /// The APIs introduced here might change in the future and will not be 
+       /// supported anymore in such a case.</font>
+       /// 
+       /// 
+       /// </summary>
+       [Serializable]
+       public abstract class ValueSource
+       {
+               
+               /// <summary> Return the DocValues used by the function query.</summary>
+               /// <param name="reader">the IndexReader used to read these values.
+               /// If any caching is involved, that caching would also be IndexReader based.  
+               /// </param>
+               /// <throws>  IOException for any error. </throws>
+               public abstract DocValues GetValues(IndexReader reader);
+               
+               /// <summary> description of field, used in explain() </summary>
+               public abstract System.String Description();
+               
+               /* (non-Javadoc) @see java.lang.Object#toString() */
+               public override System.String ToString()
+               {
+                       return Description();
+               }
+               
+               /// <summary> Needed for possible caching of query results - used by {@link ValueSourceQuery#equals(Object)}.</summary>
+               /// <seealso cref="Object.equals(Object)">
+               /// </seealso>
+               abstract public  override bool Equals(System.Object o);
+               
+               /// <summary> Needed for possible caching of query results - used by {@link ValueSourceQuery#hashCode()}.</summary>
+               /// <seealso cref="Object.hashCode()">
+               /// </seealso>
+               abstract public override int GetHashCode();
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Function/ValueSourceQuery.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Function/ValueSourceQuery.cs
new file mode 100644 (file)
index 0000000..fd5ea33
--- /dev/null
@@ -0,0 +1,265 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+using TermDocs = Mono.Lucene.Net.Index.TermDocs;
+using ToStringUtils = Mono.Lucene.Net.Util.ToStringUtils;
+using Mono.Lucene.Net.Search;
+
+namespace Mono.Lucene.Net.Search.Function
+{
+       
+       /// <summary> Expert: A Query that sets the scores of document to the
+       /// values obtained from a {@link Mono.Lucene.Net.Search.Function.ValueSource ValueSource}.
+       /// <p/>
+       /// This query provides a score for <em>each and every</em> undeleted document in the index.    
+       /// <p/>
+       /// The value source can be based on a (cached) value of an indexed field, but it
+       /// can also be based on an external source, e.g. values read from an external database. 
+       /// <p/>
+       /// Score is set as: Score(doc,query) = query.getBoost()<sup>2</sup> * valueSource(doc).  
+       /// 
+       /// <p/><font color="#FF0000">
+       /// WARNING: The status of the <b>Search.Function</b> package is experimental. 
+       /// The APIs introduced here might change in the future and will not be 
+       /// supported anymore in such a case.</font>
+       /// </summary>
+       [Serializable]
+       public class ValueSourceQuery:Query
+       {
+               internal ValueSource valSrc;
+               
+               /// <summary> Create a value source query</summary>
+               /// <param name="valSrc">provides the values defines the function to be used for scoring
+               /// </param>
+               public ValueSourceQuery(ValueSource valSrc)
+               {
+                       this.valSrc = valSrc;
+               }
+               
+               /*(non-Javadoc) @see Mono.Lucene.Net.Search.Query#rewrite(Mono.Lucene.Net.Index.IndexReader) */
+               public override Query Rewrite(IndexReader reader)
+               {
+                       return this;
+               }
+               
+               /*(non-Javadoc) @see Mono.Lucene.Net.Search.Query#extractTerms(java.util.Set) */
+               public override void  ExtractTerms(System.Collections.Hashtable terms)
+               {
+                       // no terms involved here
+               }
+               
+               [Serializable]
+               internal class ValueSourceWeight:Weight
+               {
+                       private void  InitBlock(ValueSourceQuery enclosingInstance)
+                       {
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private ValueSourceQuery enclosingInstance;
+                       public ValueSourceQuery Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       internal Similarity similarity;
+                       internal float queryNorm;
+                       internal float queryWeight;
+                       
+                       public ValueSourceWeight(ValueSourceQuery enclosingInstance, Searcher searcher)
+                       {
+                               InitBlock(enclosingInstance);
+                               this.similarity = Enclosing_Instance.GetSimilarity(searcher);
+                       }
+                       
+                       /*(non-Javadoc) @see Mono.Lucene.Net.Search.Weight#getQuery() */
+                       public override Query GetQuery()
+                       {
+                               return Enclosing_Instance;
+                       }
+                       
+                       /*(non-Javadoc) @see Mono.Lucene.Net.Search.Weight#getValue() */
+                       public override float GetValue()
+                       {
+                               return queryWeight;
+                       }
+                       
+                       /*(non-Javadoc) @see Mono.Lucene.Net.Search.Weight#sumOfSquaredWeights() */
+                       public override float SumOfSquaredWeights()
+                       {
+                               queryWeight = Enclosing_Instance.GetBoost();
+                               return queryWeight * queryWeight;
+                       }
+                       
+                       /*(non-Javadoc) @see Mono.Lucene.Net.Search.Weight#normalize(float) */
+                       public override void  Normalize(float norm)
+                       {
+                               this.queryNorm = norm;
+                               queryWeight *= this.queryNorm;
+                       }
+                       
+                       public override Scorer Scorer(IndexReader reader, bool scoreDocsInOrder, bool topScorer)
+                       {
+                               return new ValueSourceScorer(enclosingInstance, similarity, reader, this);
+                       }
+                       
+                       /*(non-Javadoc) @see Mono.Lucene.Net.Search.Weight#explain(Mono.Lucene.Net.Index.IndexReader, int) */
+                       public override Explanation Explain(IndexReader reader, int doc)
+                       {
+                               return new ValueSourceScorer(enclosingInstance, similarity, reader, this).Explain(doc);
+                       }
+               }
+               
+               /// <summary> A scorer that (simply) matches all documents, and scores each document with 
+               /// the value of the value soure in effect. As an example, if the value source 
+               /// is a (cached) field source, then value of that field in that document will 
+               /// be used. (assuming field is indexed for this doc, with a single token.)   
+               /// </summary>
+               private class ValueSourceScorer:Scorer
+               {
+                       private void  InitBlock(ValueSourceQuery enclosingInstance)
+                       {
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private ValueSourceQuery enclosingInstance;
+                       public ValueSourceQuery Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       private ValueSourceWeight weight;
+                       private float qWeight;
+                       private DocValues vals;
+                       private TermDocs termDocs;
+                       private int doc = - 1;
+                       
+                       // constructor
+                       internal ValueSourceScorer(ValueSourceQuery enclosingInstance, Similarity similarity, IndexReader reader, ValueSourceWeight w):base(similarity)
+                       {
+                               InitBlock(enclosingInstance);
+                               this.weight = w;
+                               this.qWeight = w.GetValue();
+                               // this is when/where the values are first created.
+                               vals = Enclosing_Instance.valSrc.GetValues(reader);
+                               termDocs = reader.TermDocs(null);
+                       }
+                       
+                       /// <deprecated> use {@link #NextDoc()} instead. 
+                       /// </deprecated>
+            [Obsolete("use NextDoc() instead. ")]
+                       public override bool Next()
+                       {
+                               return termDocs.Next();
+                       }
+                       
+                       public override int NextDoc()
+                       {
+                               return doc = termDocs.Next()?termDocs.Doc():NO_MORE_DOCS;
+                       }
+                       
+                       /// <deprecated> use {@link #DocID()} instead. 
+                       /// </deprecated>
+            [Obsolete("use DocID() instead.")]
+                       public override int Doc()
+                       {
+                               return termDocs.Doc();
+                       }
+                       
+                       public override int DocID()
+                       {
+                               return doc;
+                       }
+                       
+                       /*(non-Javadoc) @see Mono.Lucene.Net.Search.Scorer#score() */
+                       public override float Score()
+                       {
+                               return qWeight * vals.FloatVal(termDocs.Doc());
+                       }
+                       
+                       /// <deprecated> use {@link #Advance(int)} instead. 
+                       /// </deprecated>
+            [Obsolete("use Advance(int)} instead.")]
+                       public override bool SkipTo(int target)
+                       {
+                               return termDocs.SkipTo(target);
+                       }
+                       
+                       public override int Advance(int target)
+                       {
+                               return doc = termDocs.SkipTo(target)?termDocs.Doc():NO_MORE_DOCS;
+                       }
+                       
+                       /*(non-Javadoc) @see Mono.Lucene.Net.Search.Scorer#explain(int) */
+                       public override Explanation Explain(int doc)
+                       {
+                               float sc = qWeight * vals.FloatVal(doc);
+                               
+                               Explanation result = new ComplexExplanation(true, sc, Enclosing_Instance.ToString() + ", product of:");
+                               
+                               result.AddDetail(vals.Explain(doc));
+                               result.AddDetail(new Explanation(Enclosing_Instance.GetBoost(), "boost"));
+                               result.AddDetail(new Explanation(weight.queryNorm, "queryNorm"));
+                               return result;
+                       }
+               }
+               
+               public override Weight CreateWeight(Searcher searcher)
+               {
+                       return new ValueSourceQuery.ValueSourceWeight(this, searcher);
+               }
+               
+               public override System.String ToString(System.String field)
+               {
+                       return valSrc.ToString() + ToStringUtils.Boost(GetBoost());
+               }
+               
+               /// <summary>Returns true if <code>o</code> is equal to this. </summary>
+               public  override bool Equals(System.Object o)
+               {
+                       if (GetType() != o.GetType())
+                       {
+                               return false;
+                       }
+                       ValueSourceQuery other = (ValueSourceQuery) o;
+                       return this.GetBoost() == other.GetBoost() && this.valSrc.Equals(other.valSrc);
+               }
+               
+               /// <summary>Returns a hash code value for this object. </summary>
+               public override int GetHashCode()
+               {
+                       return (GetType().GetHashCode() + valSrc.GetHashCode()) ^ BitConverter.ToInt32(BitConverter.GetBytes(GetBoost()), 0);
+        }
+
+               override public System.Object Clone()
+               {
+                       return this.MemberwiseClone();
+               }
+
+        public ValueSource valSrc_ForNUnit
+        {
+            get { return valSrc; }
+        }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/FuzzyQuery.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/FuzzyQuery.cs
new file mode 100644 (file)
index 0000000..dfec005
--- /dev/null
@@ -0,0 +1,277 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+using Term = Mono.Lucene.Net.Index.Term;
+using PriorityQueue = Mono.Lucene.Net.Util.PriorityQueue;
+using ToStringUtils = Mono.Lucene.Net.Util.ToStringUtils;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary>Implements the fuzzy search query. The similarity measurement
+       /// is based on the Levenshtein (edit distance) algorithm.
+       /// 
+       /// Warning: this query is not very scalable with its default prefix
+       /// length of 0 - in this case, *every* term will be enumerated and
+       /// cause an edit score calculation.
+       /// 
+       /// </summary>
+       [Serializable]
+       public class FuzzyQuery:MultiTermQuery
+       {
+               
+               public const float defaultMinSimilarity = 0.5f;
+               public const int defaultPrefixLength = 0;
+               
+               private float minimumSimilarity;
+               private int prefixLength;
+               private bool termLongEnough = false;
+               
+               new protected internal Term term;
+               
+               /// <summary> Create a new FuzzyQuery that will match terms with a similarity 
+               /// of at least <code>minimumSimilarity</code> to <code>term</code>.
+               /// If a <code>prefixLength</code> &gt; 0 is specified, a common prefix
+               /// of that length is also required.
+               /// 
+               /// </summary>
+               /// <param name="term">the term to search for
+               /// </param>
+               /// <param name="minimumSimilarity">a value between 0 and 1 to set the required similarity
+               /// between the query term and the matching terms. For example, for a
+               /// <code>minimumSimilarity</code> of <code>0.5</code> a term of the same length
+               /// as the query term is considered similar to the query term if the edit distance
+               /// between both terms is less than <code>length(term)*0.5</code>
+               /// </param>
+               /// <param name="prefixLength">length of common (non-fuzzy) prefix
+               /// </param>
+               /// <throws>  IllegalArgumentException if minimumSimilarity is &gt;= 1 or &lt; 0 </throws>
+               /// <summary> or if prefixLength &lt; 0
+               /// </summary>
+               public FuzzyQuery(Term term, float minimumSimilarity, int prefixLength):base(term)
+               { // will be removed in 3.0
+                       this.term = term;
+                       
+                       if (minimumSimilarity >= 1.0f)
+                               throw new System.ArgumentException("minimumSimilarity >= 1");
+                       else if (minimumSimilarity < 0.0f)
+                               throw new System.ArgumentException("minimumSimilarity < 0");
+                       if (prefixLength < 0)
+                               throw new System.ArgumentException("prefixLength < 0");
+                       
+                       if (term.Text().Length > 1.0f / (1.0f - minimumSimilarity))
+                       {
+                               this.termLongEnough = true;
+                       }
+                       
+                       this.minimumSimilarity = minimumSimilarity;
+                       this.prefixLength = prefixLength;
+                       rewriteMethod = SCORING_BOOLEAN_QUERY_REWRITE;
+               }
+               
+               /// <summary> Calls {@link #FuzzyQuery(Term, float) FuzzyQuery(term, minimumSimilarity, 0)}.</summary>
+               public FuzzyQuery(Term term, float minimumSimilarity):this(term, minimumSimilarity, defaultPrefixLength)
+               {
+               }
+               
+               /// <summary> Calls {@link #FuzzyQuery(Term, float) FuzzyQuery(term, 0.5f, 0)}.</summary>
+               public FuzzyQuery(Term term):this(term, defaultMinSimilarity, defaultPrefixLength)
+               {
+               }
+               
+               /// <summary> Returns the minimum similarity that is required for this query to match.</summary>
+               /// <returns> float value between 0.0 and 1.0
+               /// </returns>
+               public virtual float GetMinSimilarity()
+               {
+                       return minimumSimilarity;
+               }
+               
+               /// <summary> Returns the non-fuzzy prefix length. This is the number of characters at the start
+               /// of a term that must be identical (not fuzzy) to the query term if the query
+               /// is to match that term. 
+               /// </summary>
+               public virtual int GetPrefixLength()
+               {
+                       return prefixLength;
+               }
+               
+               public /*protected internal*/ override FilteredTermEnum GetEnum(IndexReader reader)
+               {
+                       return new FuzzyTermEnum(reader, GetTerm(), minimumSimilarity, prefixLength);
+               }
+               
+               /// <summary> Returns the pattern term.</summary>
+        [Obsolete("Mono.Lucene.Net-2.9.1. This method overrides obsolete member Mono.Lucene.Net.Search.MultiTermQuery.GetTerm()")]
+               public override Term GetTerm()
+               {
+                       return term;
+               }
+               
+               public override void  SetRewriteMethod(RewriteMethod method)
+               {
+                       throw new System.NotSupportedException("FuzzyQuery cannot change rewrite method");
+               }
+               
+               public override Query Rewrite(IndexReader reader)
+               {
+                       if (!termLongEnough)
+                       {
+                               // can only match if it's exact
+                               return new TermQuery(term);
+                       }
+                       
+                       FilteredTermEnum enumerator = GetEnum(reader);
+                       int maxClauseCount = BooleanQuery.GetMaxClauseCount();
+                       ScoreTermQueue stQueue = new ScoreTermQueue(maxClauseCount);
+                       ScoreTerm reusableST = null;
+                       
+                       try
+                       {
+                               do 
+                               {
+                                       float score = 0.0f;
+                                       Term t = enumerator.Term();
+                                       if (t != null)
+                                       {
+                                               score = enumerator.Difference();
+                                               if (reusableST == null)
+                                               {
+                                                       reusableST = new ScoreTerm(t, score);
+                                               }
+                                               else if (score >= reusableST.score)
+                                               {
+                                                       // reusableST holds the last "rejected" entry, so, if
+                                                       // this new score is not better than that, there's no
+                                                       // need to try inserting it
+                                                       reusableST.score = score;
+                                                       reusableST.term = t;
+                                               }
+                                               else
+                                               {
+                                                       continue;
+                                               }
+                                               
+                                               reusableST = (ScoreTerm) stQueue.InsertWithOverflow(reusableST);
+                                       }
+                               }
+                               while (enumerator.Next());
+                       }
+                       finally
+                       {
+                               enumerator.Close();
+                       }
+                       
+                       BooleanQuery query = new BooleanQuery(true);
+                       int size = stQueue.Size();
+                       for (int i = 0; i < size; i++)
+                       {
+                               ScoreTerm st = (ScoreTerm) stQueue.Pop();
+                               TermQuery tq = new TermQuery(st.term); // found a match
+                               tq.SetBoost(GetBoost() * st.score); // set the boost
+                               query.Add(tq, BooleanClause.Occur.SHOULD); // add to query
+                       }
+                       
+                       return query;
+               }
+               
+               public override System.String ToString(System.String field)
+               {
+                       System.Text.StringBuilder buffer = new System.Text.StringBuilder();
+                       if (!term.Field().Equals(field))
+                       {
+                               buffer.Append(term.Field());
+                               buffer.Append(":");
+                       }
+                       buffer.Append(term.Text());
+                       buffer.Append('~');
+                       buffer.Append(SupportClass.Single.ToString(minimumSimilarity));
+                       buffer.Append(ToStringUtils.Boost(GetBoost()));
+                       return buffer.ToString();
+               }
+               
+               protected internal class ScoreTerm
+               {
+                       public Term term;
+                       public float score;
+                       
+                       public ScoreTerm(Term term, float score)
+                       {
+                               this.term = term;
+                               this.score = score;
+                       }
+               }
+               
+               protected internal class ScoreTermQueue:PriorityQueue
+               {
+                       
+                       public ScoreTermQueue(int size)
+                       {
+                               Initialize(size);
+                       }
+                       
+                       /* (non-Javadoc)
+                       * @see Mono.Lucene.Net.Util.PriorityQueue#lessThan(java.lang.Object, java.lang.Object)
+                       */
+                       public override bool LessThan(System.Object a, System.Object b)
+                       {
+                               ScoreTerm termA = (ScoreTerm) a;
+                               ScoreTerm termB = (ScoreTerm) b;
+                               if (termA.score == termB.score)
+                                       return termA.term.CompareTo(termB.term) > 0;
+                               else
+                                       return termA.score < termB.score;
+                       }
+               }
+               
+               public override int GetHashCode()
+               {
+                       int prime = 31;
+                       int result = base.GetHashCode();
+                       result = prime * result + BitConverter.ToInt32(BitConverter.GetBytes(minimumSimilarity), 0);
+                       result = prime * result + prefixLength;
+                       result = prime * result + ((term == null)?0:term.GetHashCode());
+                       return result;
+               }
+               
+               public  override bool Equals(System.Object obj)
+               {
+                       if (this == obj)
+                               return true;
+                       if (!base.Equals(obj))
+                               return false;
+                       if (GetType() != obj.GetType())
+                               return false;
+                       FuzzyQuery other = (FuzzyQuery) obj;
+                       if (BitConverter.ToInt32(BitConverter.GetBytes(minimumSimilarity), 0) != BitConverter.ToInt32(BitConverter.GetBytes(other.minimumSimilarity), 0))
+                               return false;
+                       if (prefixLength != other.prefixLength)
+                               return false;
+                       if (term == null)
+                       {
+                               if (other.term != null)
+                                       return false;
+                       }
+                       else if (!term.Equals(other.term))
+                               return false;
+                       return true;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/FuzzyTermEnum.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/FuzzyTermEnum.cs
new file mode 100644 (file)
index 0000000..941c25a
--- /dev/null
@@ -0,0 +1,349 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+using Term = Mono.Lucene.Net.Index.Term;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary>Subclass of FilteredTermEnum for enumerating all terms that are similiar
+       /// to the specified filter term.
+       /// 
+       /// <p/>Term enumerations are always ordered by Term.compareTo().  Each term in
+       /// the enumeration is greater than all that precede it.
+       /// </summary>
+       public sealed class FuzzyTermEnum:FilteredTermEnum
+       {
+               
+               /* This should be somewhere around the average long word.
+               * If it is longer, we waste time and space. If it is shorter, we waste a
+               * little bit of time growing the array as we encounter longer words.
+               */
+               private const int TYPICAL_LONGEST_WORD_IN_INDEX = 19;
+               
+               /* Allows us save time required to create a new array
+               * everytime similarity is called.
+               */
+               private int[][] d;
+               
+               private float similarity;
+               private bool endEnum = false;
+               
+               private Term searchTerm = null;
+               private System.String field;
+               private System.String text;
+               private System.String prefix;
+               
+               private float minimumSimilarity;
+               private float scale_factor;
+               private int[] maxDistances = new int[TYPICAL_LONGEST_WORD_IN_INDEX];
+               
+               /// <summary> Creates a FuzzyTermEnum with an empty prefix and a minSimilarity of 0.5f.
+               /// <p/>
+               /// After calling the constructor the enumeration is already pointing to the first 
+               /// valid term if such a term exists. 
+               /// 
+               /// </summary>
+               /// <param name="reader">
+               /// </param>
+               /// <param name="term">
+               /// </param>
+               /// <throws>  IOException </throws>
+               /// <seealso cref="FuzzyTermEnum(IndexReader, Term, float, int)">
+               /// </seealso>
+               public FuzzyTermEnum(IndexReader reader, Term term):this(reader, term, FuzzyQuery.defaultMinSimilarity, FuzzyQuery.defaultPrefixLength)
+               {
+               }
+               
+               /// <summary> Creates a FuzzyTermEnum with an empty prefix.
+               /// <p/>
+               /// After calling the constructor the enumeration is already pointing to the first 
+               /// valid term if such a term exists. 
+               /// 
+               /// </summary>
+               /// <param name="reader">
+               /// </param>
+               /// <param name="term">
+               /// </param>
+               /// <param name="minSimilarity">
+               /// </param>
+               /// <throws>  IOException </throws>
+               /// <seealso cref="FuzzyTermEnum(IndexReader, Term, float, int)">
+               /// </seealso>
+               public FuzzyTermEnum(IndexReader reader, Term term, float minSimilarity):this(reader, term, minSimilarity, FuzzyQuery.defaultPrefixLength)
+               {
+               }
+               
+               /// <summary> Constructor for enumeration of all terms from specified <code>reader</code> which share a prefix of
+               /// length <code>prefixLength</code> with <code>term</code> and which have a fuzzy similarity &gt;
+               /// <code>minSimilarity</code>.
+               /// <p/>
+               /// After calling the constructor the enumeration is already pointing to the first 
+               /// valid term if such a term exists. 
+               /// 
+               /// </summary>
+               /// <param name="reader">Delivers terms.
+               /// </param>
+               /// <param name="term">Pattern term.
+               /// </param>
+               /// <param name="minSimilarity">Minimum required similarity for terms from the reader. Default value is 0.5f.
+               /// </param>
+               /// <param name="prefixLength">Length of required common prefix. Default value is 0.
+               /// </param>
+               /// <throws>  IOException </throws>
+               public FuzzyTermEnum(IndexReader reader, Term term, float minSimilarity, int prefixLength):base()
+               {
+                       
+                       if (minSimilarity >= 1.0f)
+                               throw new System.ArgumentException("minimumSimilarity cannot be greater than or equal to 1");
+                       else if (minSimilarity < 0.0f)
+                               throw new System.ArgumentException("minimumSimilarity cannot be less than 0");
+                       if (prefixLength < 0)
+                               throw new System.ArgumentException("prefixLength cannot be less than 0");
+                       
+                       this.minimumSimilarity = minSimilarity;
+                       this.scale_factor = 1.0f / (1.0f - minimumSimilarity);
+                       this.searchTerm = term;
+                       this.field = searchTerm.Field();
+                       
+                       //The prefix could be longer than the word.
+                       //It's kind of silly though.  It means we must match the entire word.
+                       int fullSearchTermLength = searchTerm.Text().Length;
+                       int realPrefixLength = prefixLength > fullSearchTermLength?fullSearchTermLength:prefixLength;
+                       
+                       this.text = searchTerm.Text().Substring(realPrefixLength);
+                       this.prefix = searchTerm.Text().Substring(0, (realPrefixLength) - (0));
+                       
+                       InitializeMaxDistances();
+                       this.d = InitDistanceArray();
+                       
+                       SetEnum(reader.Terms(new Term(searchTerm.Field(), prefix)));
+               }
+               
+               /// <summary> The termCompare method in FuzzyTermEnum uses Levenshtein distance to 
+               /// calculate the distance between the given term and the comparing term. 
+               /// </summary>
+               public /*protected internal*/ override bool TermCompare(Term term)
+               {
+                       if ((System.Object) field == (System.Object) term.Field() && term.Text().StartsWith(prefix))
+                       {
+                               System.String target = term.Text().Substring(prefix.Length);
+                               this.similarity = Similarity(target);
+                               return (similarity > minimumSimilarity);
+                       }
+                       endEnum = true;
+                       return false;
+               }
+               
+               public override float Difference()
+               {
+                       return (float) ((similarity - minimumSimilarity) * scale_factor);
+               }
+               
+               public override bool EndEnum()
+               {
+                       return endEnum;
+               }
+               
+               /// <summary>***************************
+               /// Compute Levenshtein distance
+               /// ****************************
+               /// </summary>
+               
+               /// <summary> Finds and returns the smallest of three integers </summary>
+               private static int Min(int a, int b, int c)
+               {
+                       int t = (a < b)?a:b;
+                       return (t < c)?t:c;
+               }
+               
+               private int[][] InitDistanceArray()
+               {
+                       int[][] tmpArray = new int[this.text.Length + 1][];
+                       for (int i = 0; i < this.text.Length + 1; i++)
+                       {
+                               tmpArray[i] = new int[TYPICAL_LONGEST_WORD_IN_INDEX];
+                       }
+                       return tmpArray;
+               }
+               
+               /// <summary> <p/>Similarity returns a number that is 1.0f or less (including negative numbers)
+               /// based on how similar the Term is compared to a target term.  It returns
+               /// exactly 0.0f when
+               /// <pre>
+               /// editDistance &lt; maximumEditDistance</pre>
+               /// Otherwise it returns:
+               /// <pre>
+               /// 1 - (editDistance / length)</pre>
+               /// where length is the length of the shortest term (text or target) including a
+               /// prefix that are identical and editDistance is the Levenshtein distance for
+               /// the two words.<p/>
+               /// 
+               /// <p/>Embedded within this algorithm is a fail-fast Levenshtein distance
+               /// algorithm.  The fail-fast algorithm differs from the standard Levenshtein
+               /// distance algorithm in that it is aborted if it is discovered that the
+               /// mimimum distance between the words is greater than some threshold.
+               /// 
+               /// <p/>To calculate the maximum distance threshold we use the following formula:
+               /// <pre>
+               /// (1 - minimumSimilarity) * length</pre>
+               /// where length is the shortest term including any prefix that is not part of the
+               /// similarity comparision.  This formula was derived by solving for what maximum value
+               /// of distance returns false for the following statements:
+               /// <pre>
+               /// similarity = 1 - ((float)distance / (float) (prefixLength + Math.min(textlen, targetlen)));
+               /// return (similarity > minimumSimilarity);</pre>
+               /// where distance is the Levenshtein distance for the two words.
+               /// <p/>
+               /// <p/>Levenshtein distance (also known as edit distance) is a measure of similiarity
+               /// between two strings where the distance is measured as the number of character
+               /// deletions, insertions or substitutions required to transform one string to
+               /// the other string.
+               /// </summary>
+               /// <param name="target">the target word or phrase
+               /// </param>
+               /// <returns> the similarity,  0.0 or less indicates that it matches less than the required
+               /// threshold and 1.0 indicates that the text and target are identical
+               /// </returns>
+        private float Similarity(System.String target)
+        {
+
+            int m = target.Length;
+            int n = text.Length;
+            if (n == 0)
+            {
+                //we don't have anything to compare.  That means if we just add
+                //the letters for m we get the new word
+                return prefix.Length == 0 ? 0.0f : 1.0f - ((float)m / prefix.Length);
+            }
+            if (m == 0)
+            {
+                return prefix.Length == 0 ? 0.0f : 1.0f - ((float)n / prefix.Length);
+            }
+
+            int maxDistance = GetMaxDistance(m);
+
+            if (maxDistance < System.Math.Abs(m - n))
+            {
+                //just adding the characters of m to n or vice-versa results in
+                //too many edits
+                //for example "pre" length is 3 and "prefixes" length is 8.  We can see that
+                //given this optimal circumstance, the edit distance cannot be less than 5.
+                //which is 8-3 or more precisesly Math.abs(3-8).
+                //if our maximum edit distance is 4, then we can discard this word
+                //without looking at it.
+                return 0.0f;
+            }
+
+            //let's make sure we have enough room in our array to do the distance calculations.
+            if (d[0].Length <= m)
+            {
+                GrowDistanceArray(m);
+            }
+
+            // init matrix d
+            for (int i = 0; i <= n; i++)
+                d[i][0] = i;
+            for (int j = 0; j <= m; j++)
+                d[0][j] = j;
+
+            // start computing edit distance
+            for (int i = 1; i <= n; i++)
+            {
+                int bestPossibleEditDistance = m;
+                char s_i = text[i - 1];
+                for (int j = 1; j <= m; j++)
+                {
+                    if (s_i != target[j - 1])
+                    {
+                        d[i][j] = Min(d[i - 1][j], d[i][j - 1], d[i - 1][j - 1]) + 1;
+                    }
+                    else
+                    {
+                        d[i][j] = Min(d[i - 1][j] + 1, d[i][j - 1] + 1, d[i - 1][j - 1]);
+                    }
+                    bestPossibleEditDistance = System.Math.Min(bestPossibleEditDistance, d[i][j]);
+                }
+
+                //After calculating row i, the best possible edit distance
+                //can be found by found by finding the smallest value in a given column.
+                //If the bestPossibleEditDistance is greater than the max distance, abort.
+
+                if (i > maxDistance && bestPossibleEditDistance > maxDistance)
+                {
+                    //equal is okay, but not greater
+                    //the closest the target can be to the text is just too far away.
+                    //this target is leaving the party early.
+                    return 0.0f;
+                }
+            }
+
+            // this will return less than 0.0 when the edit distance is
+            // greater than the number of characters in the shorter word.
+            // but this was the formula that was previously used in FuzzyTermEnum,
+            // so it has not been changed (even though minimumSimilarity must be
+            // greater than 0.0)
+            return 1.0f - ((float)d[n][m] / (float)(prefix.Length + System.Math.Min(n, m)));
+
+        }
+               
+               /// <summary> Grow the second dimension of the array, so that we can calculate the
+               /// Levenshtein difference.
+               /// </summary>
+               private void  GrowDistanceArray(int m)
+               {
+                       for (int i = 0; i < d.Length; i++)
+                       {
+                               d[i] = new int[m + 1];
+                       }
+               }
+               
+               /// <summary> The max Distance is the maximum Levenshtein distance for the text
+               /// compared to some other value that results in score that is
+               /// better than the minimum similarity.
+               /// </summary>
+               /// <param name="m">the length of the "other value"
+               /// </param>
+               /// <returns> the maximum levenshtein distance that we care about
+               /// </returns>
+               private int GetMaxDistance(int m)
+               {
+                       return (m < maxDistances.Length)?maxDistances[m]:CalculateMaxDistance(m);
+               }
+               
+               private void  InitializeMaxDistances()
+               {
+                       for (int i = 0; i < maxDistances.Length; i++)
+                       {
+                               maxDistances[i] = CalculateMaxDistance(i);
+                       }
+               }
+               
+               private int CalculateMaxDistance(int m)
+               {
+                       return (int) ((1 - minimumSimilarity) * (System.Math.Min(text.Length, m) + prefix.Length));
+               }
+               
+               public override void  Close()
+               {
+                       base.Close(); //call super.close() and let the garbage collector do its work.
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Hit.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Hit.cs
new file mode 100644 (file)
index 0000000..ac6f16d
--- /dev/null
@@ -0,0 +1,145 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using Document = Mono.Lucene.Net.Documents.Document;
+using CorruptIndexException = Mono.Lucene.Net.Index.CorruptIndexException;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary> Wrapper used by {@link HitIterator} to provide a lazily loaded hit
+       /// from {@link Hits}.
+       /// 
+       /// </summary>
+       /// <deprecated> Use {@link TopScoreDocCollector} and {@link TopDocs} instead. Hits will be removed in Lucene 3.0.
+       /// </deprecated>
+    [Obsolete("Use TopScoreDocCollector and TopDocs instead. Hits will be removed in Lucene 3.0.")]
+       [Serializable]
+       public class Hit
+       {
+               
+               private Document doc = null;
+               
+               private bool resolved = false;
+               
+               private Hits hits = null;
+               private int hitNumber;
+               
+               /// <summary> Constructed from {@link HitIterator}</summary>
+               /// <param name="hits">Hits returned from a search
+               /// </param>
+               /// <param name="hitNumber">Hit index in Hits
+               /// </param>
+               internal Hit(Hits hits, int hitNumber)
+               {
+                       this.hits = hits;
+                       this.hitNumber = hitNumber;
+               }
+               
+               /// <summary> Returns document for this hit.
+               /// 
+               /// </summary>
+               /// <seealso cref="Hits.Doc(int)">
+               /// </seealso>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               public virtual Document GetDocument()
+               {
+                       if (!resolved)
+                               FetchTheHit();
+                       return doc;
+               }
+               
+               /// <summary> Returns score for this hit.
+               /// 
+               /// </summary>
+               /// <seealso cref="Hits.Score(int)">
+               /// </seealso>
+               public virtual float GetScore()
+               {
+                       return hits.Score(hitNumber);
+               }
+               
+               /// <summary> Returns id for this hit.
+               /// 
+               /// </summary>
+               /// <seealso cref="Hits.Id(int)">
+               /// </seealso>
+               public virtual int GetId()
+               {
+                       return hits.Id(hitNumber);
+               }
+               
+               private void  FetchTheHit()
+               {
+                       doc = hits.Doc(hitNumber);
+                       resolved = true;
+               }
+               
+               // provide some of the Document style interface (the simple stuff)
+               
+               /// <summary> Returns the boost factor for this hit on any field of the underlying document.
+               /// 
+               /// </summary>
+               /// <seealso cref="Document.GetBoost()">
+               /// </seealso>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               public virtual float GetBoost()
+               {
+                       return GetDocument().GetBoost();
+               }
+               
+               /// <summary> Returns the string value of the field with the given name if any exist in
+               /// this document, or null.  If multiple fields exist with this name, this
+               /// method returns the first value added. If only binary fields with this name
+               /// exist, returns null.
+               /// 
+               /// </summary>
+               /// <seealso cref="Document.Get(String)">
+               /// </seealso>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               public virtual System.String Get(System.String name)
+               {
+                       return GetDocument().Get(name);
+               }
+               
+               /// <summary> Prints the parameters to be used to discover the promised result.</summary>
+               public override System.String ToString()
+               {
+                       System.Text.StringBuilder buffer = new System.Text.StringBuilder();
+                       buffer.Append("Hit<");
+                       buffer.Append(hits.ToString());
+                       buffer.Append(" [");
+                       buffer.Append(hitNumber);
+                       buffer.Append("] ");
+                       if (resolved)
+                       {
+                               buffer.Append("resolved");
+                       }
+                       else
+                       {
+                               buffer.Append("unresolved");
+                       }
+                       buffer.Append(">");
+                       return buffer.ToString();
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/HitCollector.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/HitCollector.cs
new file mode 100644 (file)
index 0000000..9975aa8
--- /dev/null
@@ -0,0 +1,64 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary> Lower-level search API. <br/>
+       /// HitCollectors are primarily meant to be used to implement queries, sorting
+       /// and filtering. See {@link Collector} for a lower level and higher performance
+       /// (on a multi-segment index) API.
+       /// 
+       /// </summary>
+       /// <seealso cref="Searcher.Search(Query,HitCollector)">
+       /// </seealso>
+       /// <version>  $Id: HitCollector.java 764551 2009-04-13 18:33:56Z mikemccand $
+       /// </version>
+       /// <deprecated> Please use {@link Collector} instead.
+       /// </deprecated>
+    [Obsolete("Please use Collector instead.")]
+       public abstract class HitCollector
+       {
+               /// <summary>Called once for every document matching a query, with the document
+               /// number and its raw score.
+               /// 
+               /// <p/>If, for example, an application wished to collect all of the hits for a
+               /// query in a BitSet, then it might:<pre>
+               /// Searcher searcher = new IndexSearcher(indexReader);
+               /// final BitSet bits = new BitSet(indexReader.maxDoc());
+               /// searcher.search(query, new HitCollector() {
+               /// public void collect(int doc, float score) {
+               /// bits.set(doc);
+               /// }
+               /// });
+               /// </pre>
+               /// 
+               /// <p/>Note: This is called in an inner search loop.  For good search
+               /// performance, implementations of this method should not call
+               /// {@link Searcher#Doc(int)} or
+               /// {@link Mono.Lucene.Net.Index.IndexReader#Document(int)} on every
+               /// document number encountered.  Doing so can slow searches by an order
+               /// of magnitude or more.
+               /// <p/>Note: The <code>score</code> passed to this method is a raw score.
+               /// In other words, the score will not necessarily be a float whose value is
+               /// between 0 and 1.
+               /// </summary>
+               public abstract void  Collect(int doc, float score);
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/HitCollectorWrapper.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/HitCollectorWrapper.cs
new file mode 100644 (file)
index 0000000..0de1e76
--- /dev/null
@@ -0,0 +1,65 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary> Wrapper for ({@link HitCollector}) implementations, which simply re-bases the
+       /// incoming docID before calling {@link HitCollector#collect}.
+       /// 
+       /// </summary>
+       /// <deprecated> Please migrate custom HitCollectors to the new {@link Collector}
+       /// class. This class will be removed when {@link HitCollector} is
+       /// removed.
+       /// </deprecated>
+    [Obsolete("Please migrate custom HitCollectors to the new Collector class. This class will be removed when HitCollector is removed.")]
+       public class HitCollectorWrapper:Collector
+       {
+               private HitCollector collector;
+               private int base_Renamed = 0;
+               private Scorer scorer = null;
+               
+               public HitCollectorWrapper(HitCollector collector)
+               {
+                       this.collector = collector;
+               }
+               
+               public override void  SetNextReader(IndexReader reader, int docBase)
+               {
+                       base_Renamed = docBase;
+               }
+               
+               public override void  Collect(int doc)
+               {
+                       collector.Collect(doc + base_Renamed, scorer.Score());
+               }
+               
+               public override void  SetScorer(Scorer scorer)
+               {
+                       this.scorer = scorer;
+               }
+               
+               public override bool AcceptsDocsOutOfOrder()
+               {
+                       return false;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/HitIterator.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/HitIterator.cs
new file mode 100644 (file)
index 0000000..7ab8527
--- /dev/null
@@ -0,0 +1,87 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary> An iterator over {@link Hits} that provides lazy fetching of each document.
+       /// {@link Hits#Iterator()} returns an instance of this class.  Calls to {@link #next()}
+       /// return a {@link Hit} instance.
+       /// 
+       /// </summary>
+       /// <deprecated> Use {@link TopScoreDocCollector} and {@link TopDocs} instead. Hits will be removed in Lucene 3.0.
+       /// </deprecated>
+    [Obsolete("Use TopScoreDocCollector and TopDocs instead. Hits will be removed in Lucene 3.0.")]
+       public class HitIterator : System.Collections.IEnumerator
+       {
+               /// <summary> Returns a {@link Hit} instance representing the next hit in {@link Hits}.
+               /// 
+               /// </summary>
+               /// <returns> Next {@link Hit}.
+               /// </returns>
+               public virtual System.Object Current
+               {
+                       get
+                       {
+                               if (hitNumber == hits.Length())
+                                       throw new System.ArgumentOutOfRangeException();
+                               
+                               System.Object next = new Hit(hits, hitNumber);
+                               hitNumber++;
+                               return next;
+                       }
+                       
+               }
+               private Hits hits;
+               private int hitNumber = 0;
+               
+               /// <summary> Constructed from {@link Hits#Iterator()}.</summary>
+               internal HitIterator(Hits hits)
+               {
+                       this.hits = hits;
+               }
+               
+               /// <returns> true if current hit is less than the total number of {@link Hits}.
+               /// </returns>
+               public virtual bool MoveNext()
+               {
+                       return hitNumber < hits.Length();
+               }
+               
+               /// <summary> Unsupported operation.
+               /// 
+               /// </summary>
+               /// <throws>  UnsupportedOperationException </throws>
+               public virtual void  Remove()
+               {
+                       throw new System.NotSupportedException();
+               }
+               
+               /// <summary> Returns the total number of hits.</summary>
+               public virtual int Length()
+               {
+                       return hits.Length();
+               }
+               
+               virtual public void  Reset()
+               {
+            System.Diagnostics.Debug.Fail("Port issue:", "Lets see if we need this HitIterator.Reset()"); // {{Aroush-2.9}}
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/HitQueue.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/HitQueue.cs
new file mode 100644 (file)
index 0000000..dc6234c
--- /dev/null
@@ -0,0 +1,94 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using PriorityQueue = Mono.Lucene.Net.Util.PriorityQueue;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       public sealed class HitQueue:PriorityQueue
+       {
+               
+               private bool prePopulate;
+               
+               /// <summary> Creates a new instance with <code>size</code> elements. If
+               /// <code>prePopulate</code> is set to true, the queue will pre-populate itself
+               /// with sentinel objects and set its {@link #Size()} to <code>size</code>. In
+               /// that case, you should not rely on {@link #Size()} to get the number of
+               /// actual elements that were added to the queue, but keep track yourself.<br/>
+               /// <b>NOTE:</b> in case <code>prePopulate</code> is true, you should pop
+               /// elements from the queue using the following code example:
+               /// 
+               /// <pre>
+               /// PriorityQueue pq = new HitQueue(10, true); // pre-populate.
+               /// ScoreDoc top = pq.top();
+               /// 
+               /// // Add/Update one element.
+               /// top.score = 1.0f;
+               /// top.doc = 0;
+               /// top = (ScoreDoc) pq.updateTop();
+               /// int totalHits = 1;
+               /// 
+               /// // Now pop only the elements that were *truly* inserted.
+               /// // First, pop all the sentinel elements (there are pq.size() - totalHits).
+               /// for (int i = pq.size() - totalHits; i &gt; 0; i--) pq.pop();
+               /// 
+               /// // Now pop the truly added elements.
+               /// ScoreDoc[] results = new ScoreDoc[totalHits];
+               /// for (int i = totalHits - 1; i &gt;= 0; i--) {
+               /// results[i] = (ScoreDoc) pq.pop();
+               /// }
+               /// </pre>
+               /// 
+               /// <p/><b>NOTE</b>: This class pre-allocate a full array of
+               /// length <code>size</code>.
+               /// 
+               /// </summary>
+               /// <param name="size">the requested size of this queue.
+               /// </param>
+               /// <param name="prePopulate">specifies whether to pre-populate the queue with sentinel values.
+               /// </param>
+               /// <seealso cref="GetSentinelObject()">
+               /// </seealso>
+               public /*internal*/ HitQueue(int size, bool prePopulate)
+               {
+                       this.prePopulate = prePopulate;
+                       Initialize(size);
+               }
+               
+               // Returns null if prePopulate is false.
+               protected internal override System.Object GetSentinelObject()
+               {
+                       // Always set the doc Id to MAX_VALUE so that it won't be favored by
+                       // lessThan. This generally should not happen since if score is not NEG_INF,
+                       // TopScoreDocCollector will always add the object to the queue.
+                       return !prePopulate?null:new ScoreDoc(System.Int32.MaxValue, System.Single.NegativeInfinity);
+               }
+               
+               public override bool LessThan(System.Object a, System.Object b)
+               {
+                       ScoreDoc hitA = (ScoreDoc) a;
+                       ScoreDoc hitB = (ScoreDoc) b;
+                       if (hitA.score == hitB.score)
+                               return hitA.doc > hitB.doc;
+                       else
+                               return hitA.score < hitB.score;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Hits.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Hits.cs
new file mode 100644 (file)
index 0000000..1decfa6
--- /dev/null
@@ -0,0 +1,320 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using Document = Mono.Lucene.Net.Documents.Document;
+using CorruptIndexException = Mono.Lucene.Net.Index.CorruptIndexException;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary> A ranked list of documents, used to hold search results.
+       /// <p/>
+       /// <b>Caution:</b> Iterate only over the hits needed. Iterating over all hits is
+       /// generally not desirable and may be the source of performance issues. If you
+       /// need to iterate over many or all hits, consider using the search method that
+       /// takes a {@link HitCollector}.
+       /// <p/>
+       /// <p/>
+       /// <b>Note:</b> Deleting matching documents concurrently with traversing the
+       /// hits, might, when deleting hits that were not yet retrieved, decrease
+       /// {@link #Length()}. In such case,
+       /// {@link java.util.ConcurrentModificationException
+       /// ConcurrentModificationException} is thrown when accessing hit <code>n</code>
+       /// &gt; current_{@link #Length()} (but <code>n</code> &lt; {@link #Length()}
+       /// _at_start).
+       /// 
+       /// </summary>
+       /// <deprecated> see {@link Searcher#Search(Query, int)},
+       /// {@link Searcher#Search(Query, Filter, int)} and
+       /// {@link Searcher#Search(Query, Filter, int, Sort)}:<br/>
+       /// 
+       /// <pre>
+       /// TopDocs topDocs = searcher.Search(query, numHits);
+       /// ScoreDoc[] hits = topDocs.scoreDocs;
+       /// for (int i = 0; i &lt; hits.Length; i++) {
+       /// int docId = hits[i].doc;
+       /// Document d = searcher.Doc(docId);
+       /// // do something with current hit
+       /// ...
+       /// </pre>
+       /// </deprecated>
+    [Obsolete("see Searcher.Search(Query, int), Searcher.Search(Query, Filter, int) and Searcher.Search(Query, Filter, int, Sort)")]
+       public sealed class Hits
+       {
+               private Weight weight;
+               private Searcher searcher;
+               private Filter filter = null;
+               private Sort sort = null;
+               
+               private int length; // the total number of hits
+               private System.Collections.ArrayList hitDocs = System.Collections.ArrayList.Synchronized(new System.Collections.ArrayList(10)); // cache of hits retrieved
+               
+               private HitDoc first; // head of LRU cache
+               private HitDoc last; // tail of LRU cache
+               private int numDocs = 0; // number cached
+               private int maxDocs = 200; // max to cache
+               
+               private int nDeletions; // # deleted docs in the index.    
+               private int lengthAtStart; // this is the number apps usually count on (although deletions can bring it down). 
+               private int nDeletedHits = 0; // # of already collected hits that were meanwhile deleted.
+               
+               public /*internal*/ bool debugCheckedForDeletions = false; // for test purposes.
+               
+               internal Hits(Searcher s, Query q, Filter f)
+               {
+                       weight = q.Weight(s);
+                       searcher = s;
+                       filter = f;
+                       nDeletions = CountDeletions(s);
+                       GetMoreDocs(50); // retrieve 100 initially
+                       lengthAtStart = length;
+               }
+               
+               internal Hits(Searcher s, Query q, Filter f, Sort o)
+               {
+                       weight = q.Weight(s);
+                       searcher = s;
+                       filter = f;
+                       sort = o;
+                       nDeletions = CountDeletions(s);
+                       GetMoreDocs(50); // retrieve 100 initially
+                       lengthAtStart = length;
+               }
+               
+               // count # deletions, return -1 if unknown.
+               private int CountDeletions(Searcher s)
+               {
+                       int cnt = - 1;
+                       if (s is IndexSearcher)
+                       {
+                               cnt = s.MaxDoc() - ((IndexSearcher) s).GetIndexReader().NumDocs();
+                       }
+                       return cnt;
+               }
+               
+               /// <summary> Tries to add new documents to hitDocs.
+               /// Ensures that the hit numbered <code>min</code> has been retrieved.
+               /// </summary>
+               private void  GetMoreDocs(int min)
+               {
+                       if (hitDocs.Count > min)
+                       {
+                               min = hitDocs.Count;
+                       }
+                       
+                       int n = min * 2; // double # retrieved
+                       TopDocs topDocs = (sort == null)?searcher.Search(weight, filter, n):searcher.Search(weight, filter, n, sort);
+                       
+                       length = topDocs.TotalHits;
+                       ScoreDoc[] scoreDocs = topDocs.ScoreDocs;
+                       
+                       float scoreNorm = 1.0f;
+                       
+                       if (length > 0 && topDocs.GetMaxScore() > 1.0f)
+                       {
+                               scoreNorm = 1.0f / topDocs.GetMaxScore();
+                       }
+                       
+                       int start = hitDocs.Count - nDeletedHits;
+                       
+                       // any new deletions?
+                       int nDels2 = CountDeletions(searcher);
+                       debugCheckedForDeletions = false;
+                       if (nDeletions < 0 || nDels2 > nDeletions)
+                       {
+                               // either we cannot count deletions, or some "previously valid hits" might have been deleted, so find exact start point
+                               nDeletedHits = 0;
+                               debugCheckedForDeletions = true;
+                               int i2 = 0;
+                               for (int i1 = 0; i1 < hitDocs.Count && i2 < scoreDocs.Length; i1++)
+                               {
+                                       int id1 = ((HitDoc) hitDocs[i1]).id;
+                                       int id2 = scoreDocs[i2].doc;
+                                       if (id1 == id2)
+                                       {
+                                               i2++;
+                                       }
+                                       else
+                                       {
+                                               nDeletedHits++;
+                                       }
+                               }
+                               start = i2;
+                       }
+                       
+                       int end = scoreDocs.Length < length?scoreDocs.Length:length;
+                       length += nDeletedHits;
+                       for (int i = start; i < end; i++)
+                       {
+                               hitDocs.Add(new HitDoc(scoreDocs[i].score * scoreNorm, scoreDocs[i].doc));
+                       }
+                       
+                       nDeletions = nDels2;
+               }
+               
+               /// <summary>Returns the total number of hits available in this set. </summary>
+               public int Length()
+               {
+                       return length;
+               }
+               
+               /// <summary>Returns the stored fields of the n<sup>th</sup> document in this set.
+               /// <p/>Documents are cached, so that repeated requests for the same element may
+               /// return the same Document object.
+               /// </summary>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               public Document Doc(int n)
+               {
+                       HitDoc hitDoc = HitDoc(n);
+                       
+                       // Update LRU cache of documents
+                       Remove(hitDoc); // remove from list, if there
+                       AddToFront(hitDoc); // add to front of list
+                       if (numDocs > maxDocs)
+                       {
+                               // if cache is full
+                               HitDoc oldLast = last;
+                               Remove(last); // flush last
+                               oldLast.doc = null; // let doc get gc'd
+                       }
+                       
+                       if (hitDoc.doc == null)
+                       {
+                               hitDoc.doc = searcher.Doc(hitDoc.id); // cache miss: read document
+                       }
+                       
+                       return hitDoc.doc;
+               }
+               
+               /// <summary>Returns the score for the n<sup>th</sup> document in this set. </summary>
+               public float Score(int n)
+               {
+                       return HitDoc(n).score;
+               }
+               
+               /// <summary>Returns the id for the n<sup>th</sup> document in this set.
+               /// Note that ids may change when the index changes, so you cannot
+               /// rely on the id to be stable.
+               /// </summary>
+               public int Id(int n)
+               {
+                       return HitDoc(n).id;
+               }
+               
+               /// <summary> Returns a {@link HitIterator} to navigate the Hits.  Each item returned
+               /// from {@link Iterator#next()} is a {@link Hit}.
+               /// <p/>
+               /// <b>Caution:</b> Iterate only over the hits needed.  Iterating over all
+               /// hits is generally not desirable and may be the source of
+               /// performance issues. If you need to iterate over many or all hits, consider
+               /// using a search method that takes a {@link HitCollector}.
+               /// <p/>
+               /// </summary>
+               public System.Collections.IEnumerator Iterator()
+               {
+                       return new HitIterator(this);
+               }
+               
+               private HitDoc HitDoc(int n)
+               {
+                       if (n >= lengthAtStart)
+                       {
+                               throw new System.IndexOutOfRangeException("Not a valid hit number: " + n);
+                       }
+                       
+                       if (n >= hitDocs.Count)
+                       {
+                               GetMoreDocs(n);
+                       }
+                       
+                       if (n >= length)
+                       {
+                               throw new System.Exception("Not a valid hit number: " + n);
+                       }
+                       
+                       return (HitDoc) hitDocs[n];
+               }
+               
+               private void  AddToFront(HitDoc hitDoc)
+               {
+                       // insert at front of cache
+                       if (first == null)
+                       {
+                               last = hitDoc;
+                       }
+                       else
+                       {
+                               first.prev = hitDoc;
+                       }
+                       
+                       hitDoc.next = first;
+                       first = hitDoc;
+                       hitDoc.prev = null;
+                       
+                       numDocs++;
+               }
+               
+               private void  Remove(HitDoc hitDoc)
+               {
+                       // remove from cache
+                       if (hitDoc.doc == null)
+                       {
+                               // it's not in the list
+                               return ; // abort
+                       }
+                       
+                       if (hitDoc.next == null)
+                       {
+                               last = hitDoc.prev;
+                       }
+                       else
+                       {
+                               hitDoc.next.prev = hitDoc.prev;
+                       }
+                       
+                       if (hitDoc.prev == null)
+                       {
+                               first = hitDoc.next;
+                       }
+                       else
+                       {
+                               hitDoc.prev.next = hitDoc.next;
+                       }
+                       
+                       numDocs--;
+               }
+       }
+       
+       sealed class HitDoc
+       {
+               internal float score;
+               internal int id;
+               internal Document doc = null;
+               
+               internal HitDoc next; // in doubly-linked cache
+               internal HitDoc prev; // in doubly-linked cache
+               
+               internal HitDoc(float s, int i)
+               {
+                       score = s;
+                       id = i;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/IndexSearcher.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/IndexSearcher.cs
new file mode 100644 (file)
index 0000000..06e261e
--- /dev/null
@@ -0,0 +1,404 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using Document = Mono.Lucene.Net.Documents.Document;
+using FieldSelector = Mono.Lucene.Net.Documents.FieldSelector;
+using CorruptIndexException = Mono.Lucene.Net.Index.CorruptIndexException;
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+using Term = Mono.Lucene.Net.Index.Term;
+using Directory = Mono.Lucene.Net.Store.Directory;
+using ReaderUtil = Mono.Lucene.Net.Util.ReaderUtil;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary>Implements search over a single IndexReader.
+       /// 
+       /// <p/>Applications usually need only call the inherited {@link #Search(Query)}
+       /// or {@link #Search(Query,Filter)} methods. For performance reasons it is 
+       /// recommended to open only one IndexSearcher and use it for all of your searches.
+       /// 
+       /// <p/>Note that you can only access Hits from an IndexSearcher as long as it is
+       /// not yet closed, otherwise an IOException will be thrown. 
+       /// 
+       /// <a name="thread-safety"></a><p/><b>NOTE</b>: {@link
+       /// <code>IndexSearcher</code>} instances are completely
+       /// thread safe, meaning multiple threads can call any of its
+       /// methods, concurrently.  If your application requires
+       /// external synchronization, you should <b>not</b>
+       /// synchronize on the <code>IndexSearcher</code> instance;
+       /// use your own (non-Lucene) objects instead.<p/>
+       /// </summary>
+    [Serializable]
+       public class IndexSearcher:Searcher
+       {
+               internal IndexReader reader;
+               private bool closeReader;
+               private IndexReader[] subReaders;
+               private int[] docStarts;
+               
+               /// <summary>Creates a searcher searching the index in the named directory.</summary>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               /// <deprecated> Use {@link #IndexSearcher(Directory, boolean)} instead
+               /// </deprecated>
+        [Obsolete("Use IndexSearcher(Directory, bool) instead")]
+               public IndexSearcher(System.String path):this(IndexReader.Open(path), true)
+               {
+               }
+               
+               /// <summary>Creates a searcher searching the index in the named
+               /// directory.  You should pass readOnly=true, since it
+               /// gives much better concurrent performance, unless you
+               /// intend to do write operations (delete documents or
+               /// change norms) with the underlying IndexReader.
+               /// </summary>
+               /// <param name="path">directory where IndexReader will be opened
+               /// </param>
+               /// <param name="readOnly">if true, the underlying IndexReader
+               /// will be opened readOnly
+               /// </param>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               /// <deprecated> Use {@link #IndexSearcher(Directory, boolean)} instead
+               /// </deprecated>
+        [Obsolete("Use IndexSearcher(Directory, bool) instead")]
+               public IndexSearcher(System.String path, bool readOnly):this(IndexReader.Open(path, readOnly), true)
+               {
+               }
+               
+               /// <summary>Creates a searcher searching the index in the provided directory.</summary>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               /// <deprecated> Use {@link #IndexSearcher(Directory, boolean)} instead
+               /// </deprecated>
+        [Obsolete("Use IndexSearcher(Directory, bool) instead")]
+               public IndexSearcher(Directory directory):this(IndexReader.Open(directory), true)
+               {
+               }
+               
+               /// <summary>Creates a searcher searching the index in the named
+               /// directory.  You should pass readOnly=true, since it
+               /// gives much better concurrent performance, unless you
+               /// intend to do write operations (delete documents or
+               /// change norms) with the underlying IndexReader.
+               /// </summary>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               /// <param name="path">directory where IndexReader will be opened
+               /// </param>
+               /// <param name="readOnly">if true, the underlying IndexReader
+               /// will be opened readOnly
+               /// </param>
+               public IndexSearcher(Directory path, bool readOnly):this(IndexReader.Open(path, readOnly), true)
+               {
+               }
+               
+               /// <summary>Creates a searcher searching the provided index. </summary>
+               public IndexSearcher(IndexReader r):this(r, false)
+               {
+               }
+               
+               private IndexSearcher(IndexReader r, bool closeReader)
+               {
+                       reader = r;
+                       this.closeReader = closeReader;
+                       
+                       System.Collections.IList subReadersList = new System.Collections.ArrayList();
+                       GatherSubReaders(subReadersList, reader);
+            subReaders = (IndexReader[])new System.Collections.ArrayList(subReadersList).ToArray(typeof(IndexReader));
+                       docStarts = new int[subReaders.Length];
+                       int maxDoc = 0;
+                       for (int i = 0; i < subReaders.Length; i++)
+                       {
+                               docStarts[i] = maxDoc;
+                               maxDoc += subReaders[i].MaxDoc();
+                       }
+               }
+               
+               protected internal virtual void  GatherSubReaders(System.Collections.IList allSubReaders, IndexReader r)
+               {
+                       ReaderUtil.GatherSubReaders(allSubReaders, r);
+               }
+               
+               /// <summary>Return the {@link IndexReader} this searches. </summary>
+               public virtual IndexReader GetIndexReader()
+               {
+                       return reader;
+               }
+               
+               /// <summary> Note that the underlying IndexReader is not closed, if
+               /// IndexSearcher was constructed with IndexSearcher(IndexReader r).
+               /// If the IndexReader was supplied implicitly by specifying a directory, then
+               /// the IndexReader gets closed.
+               /// </summary>
+               public override void  Close()
+               {
+                       if (closeReader)
+                               reader.Close();
+               }
+
+        /// <summary>
+        /// .NET
+        /// </summary>
+        public override void Dispose()
+        {
+            Close();
+        }
+               
+               // inherit javadoc
+               public override int DocFreq(Term term)
+               {
+                       return reader.DocFreq(term);
+               }
+               
+               // inherit javadoc
+               public override Document Doc(int i)
+               {
+                       return reader.Document(i);
+               }
+               
+               // inherit javadoc
+               public override Document Doc(int i, FieldSelector fieldSelector)
+               {
+                       return reader.Document(i, fieldSelector);
+               }
+               
+               // inherit javadoc
+               public override int MaxDoc()
+               {
+                       return reader.MaxDoc();
+               }
+               
+               // inherit javadoc
+               public override TopDocs Search(Weight weight, Filter filter, int nDocs)
+               {
+                       
+                       if (nDocs <= 0)
+                       {
+                               throw new System.ArgumentException("nDocs must be > 0");
+                       }
+            nDocs = System.Math.Min(nDocs, reader.MaxDoc());
+
+                       TopScoreDocCollector collector = TopScoreDocCollector.create(nDocs, !weight.ScoresDocsOutOfOrder());
+                       Search(weight, filter, collector);
+                       return collector.TopDocs();
+               }
+               
+               public override TopFieldDocs Search(Weight weight, Filter filter, int nDocs, Sort sort)
+               {
+                       return Search(weight, filter, nDocs, sort, true);
+               }
+               
+               /// <summary> Just like {@link #Search(Weight, Filter, int, Sort)}, but you choose
+               /// whether or not the fields in the returned {@link FieldDoc} instances
+               /// should be set by specifying fillFields.<br/>
+               /// 
+               /// <p/>
+               /// NOTE: this does not compute scores by default. If you need scores, create
+               /// a {@link TopFieldCollector} instance by calling
+               /// {@link TopFieldCollector#create} and then pass that to
+               /// {@link #Search(Weight, Filter, Collector)}.
+               /// <p/>
+               /// </summary>
+               public virtual TopFieldDocs Search(Weight weight, Filter filter, int nDocs, Sort sort, bool fillFields)
+               {
+            nDocs = System.Math.Min(nDocs, reader.MaxDoc());
+
+                       SortField[] fields = sort.fields;
+                       bool legacy = false;
+                       for (int i = 0; i < fields.Length; i++)
+                       {
+                               SortField field = fields[i];
+                               System.String fieldname = field.GetField();
+                               int type = field.GetType();
+                               // Resolve AUTO into its true type
+                               if (type == SortField.AUTO)
+                               {
+                                       int autotype = SortField.DetectFieldType(reader, fieldname);
+                                       if (autotype == SortField.STRING)
+                                       {
+                                               fields[i] = new SortField(fieldname, field.GetLocale(), field.GetReverse());
+                                       }
+                                       else
+                                       {
+                                               fields[i] = new SortField(fieldname, autotype, field.GetReverse());
+                                       }
+                               }
+                               
+                               if (field.GetUseLegacySearch())
+                               {
+                                       legacy = true;
+                               }
+                       }
+                       
+                       if (legacy)
+                       {
+                               // Search the single top-level reader
+                               TopDocCollector collector = new TopFieldDocCollector(reader, sort, nDocs);
+                               HitCollectorWrapper hcw = new HitCollectorWrapper(collector);
+                               hcw.SetNextReader(reader, 0);
+                               if (filter == null)
+                               {
+                                       Scorer scorer = weight.Scorer(reader, true, true);
+                                       if (scorer != null)
+                                       {
+                                               scorer.Score(hcw);
+                                       }
+                               }
+                               else
+                               {
+                                       SearchWithFilter(reader, weight, filter, hcw);
+                               }
+                               return (TopFieldDocs) collector.TopDocs();
+                       }
+                       
+                       TopFieldCollector collector2 = TopFieldCollector.create(sort, nDocs, fillFields, fieldSortDoTrackScores, fieldSortDoMaxScore, !weight.ScoresDocsOutOfOrder());
+                       Search(weight, filter, collector2);
+                       return (TopFieldDocs) collector2.TopDocs();
+               }
+               
+               public override void  Search(Weight weight, Filter filter, Collector collector)
+               {
+                       
+                       if (filter == null)
+                       {
+                               for (int i = 0; i < subReaders.Length; i++)
+                               {
+                                       // search each subreader
+                                       collector.SetNextReader(subReaders[i], docStarts[i]);
+                                       Scorer scorer = weight.Scorer(subReaders[i], !collector.AcceptsDocsOutOfOrder(), true);
+                                       if (scorer != null)
+                                       {
+                                               scorer.Score(collector);
+                                       }
+                               }
+                       }
+                       else
+                       {
+                               for (int i = 0; i < subReaders.Length; i++)
+                               {
+                                       // search each subreader
+                                       collector.SetNextReader(subReaders[i], docStarts[i]);
+                                       SearchWithFilter(subReaders[i], weight, filter, collector);
+                               }
+                       }
+               }
+               
+               private void  SearchWithFilter(IndexReader reader, Weight weight, Filter filter, Collector collector)
+               {
+                       
+                       System.Diagnostics.Debug.Assert(filter != null);
+                       
+                       Scorer scorer = weight.Scorer(reader, true, false);
+                       if (scorer == null)
+                       {
+                               return ;
+                       }
+                       
+                       int docID = scorer.DocID();
+                       System.Diagnostics.Debug.Assert(docID == - 1 || docID == DocIdSetIterator.NO_MORE_DOCS);
+                       
+                       // CHECKME: use ConjunctionScorer here?
+                       DocIdSet filterDocIdSet = filter.GetDocIdSet(reader);
+                       if (filterDocIdSet == null)
+                       {
+                               // this means the filter does not accept any documents.
+                               return ;
+                       }
+                       
+                       DocIdSetIterator filterIter = filterDocIdSet.Iterator();
+                       if (filterIter == null)
+                       {
+                               // this means the filter does not accept any documents.
+                               return ;
+                       }
+                       int filterDoc = filterIter.NextDoc();
+                       int scorerDoc = scorer.Advance(filterDoc);
+                       
+                       collector.SetScorer(scorer);
+                       while (true)
+                       {
+                               if (scorerDoc == filterDoc)
+                               {
+                                       // Check if scorer has exhausted, only before collecting.
+                                       if (scorerDoc == DocIdSetIterator.NO_MORE_DOCS)
+                                       {
+                                               break;
+                                       }
+                                       collector.Collect(scorerDoc);
+                                       filterDoc = filterIter.NextDoc();
+                                       scorerDoc = scorer.Advance(filterDoc);
+                               }
+                               else if (scorerDoc > filterDoc)
+                               {
+                                       filterDoc = filterIter.Advance(scorerDoc);
+                               }
+                               else
+                               {
+                                       scorerDoc = scorer.Advance(filterDoc);
+                               }
+                       }
+               }
+               
+               public override Query Rewrite(Query original)
+               {
+                       Query query = original;
+                       for (Query rewrittenQuery = query.Rewrite(reader); rewrittenQuery != query; rewrittenQuery = query.Rewrite(reader))
+                       {
+                               query = rewrittenQuery;
+                       }
+                       return query;
+               }
+               
+               public override Explanation Explain(Weight weight, int doc)
+               {
+                       int n = ReaderUtil.SubIndex(doc, docStarts);
+                       int deBasedDoc = doc - docStarts[n];
+                       
+                       return weight.Explain(subReaders[n], deBasedDoc);
+               }
+               
+               private bool fieldSortDoTrackScores;
+               private bool fieldSortDoMaxScore;
+               
+               /// <summary> By default, no scores are computed when sorting by field (using
+               /// {@link #Search(Query,Filter,int,Sort)}). You can change that, per
+               /// IndexSearcher instance, by calling this method. Note that this will incur
+               /// a CPU cost.
+               /// 
+               /// </summary>
+               /// <param name="doTrackScores">If true, then scores are returned for every matching document
+               /// in {@link TopFieldDocs}.
+               /// 
+               /// </param>
+               /// <param name="doMaxScore">If true, then the max score for all matching docs is computed.
+               /// </param>
+               public virtual void  SetDefaultFieldSortScoring(bool doTrackScores, bool doMaxScore)
+               {
+                       fieldSortDoTrackScores = doTrackScores;
+                       fieldSortDoMaxScore = doMaxScore;
+               }
+
+        public IndexReader reader_ForNUnit
+        {
+            get { return reader; }
+        }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/MatchAllDocsQuery.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/MatchAllDocsQuery.cs
new file mode 100644 (file)
index 0000000..fe2ae4a
--- /dev/null
@@ -0,0 +1,227 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+using TermDocs = Mono.Lucene.Net.Index.TermDocs;
+using ToStringUtils = Mono.Lucene.Net.Util.ToStringUtils;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary> A query that matches all documents.
+       /// 
+       /// </summary>
+       [Serializable]
+       public class MatchAllDocsQuery:Query
+       {
+               
+               public MatchAllDocsQuery():this(null)
+               {
+               }
+               
+               private System.String normsField;
+               
+               /// <param name="normsField">Field used for normalization factor (document boost). Null if nothing.
+               /// </param>
+               public MatchAllDocsQuery(System.String normsField)
+               {
+                       this.normsField = normsField;
+               }
+               
+               private class MatchAllScorer:Scorer
+               {
+                       private void  InitBlock(MatchAllDocsQuery enclosingInstance)
+                       {
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private MatchAllDocsQuery enclosingInstance;
+                       public MatchAllDocsQuery Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       internal TermDocs termDocs;
+                       internal float score;
+                       internal byte[] norms;
+                       private int doc = - 1;
+                       
+                       internal MatchAllScorer(MatchAllDocsQuery enclosingInstance, IndexReader reader, Similarity similarity, Weight w, byte[] norms):base(similarity)
+                       {
+                               InitBlock(enclosingInstance);
+                               this.termDocs = reader.TermDocs(null);
+                               score = w.GetValue();
+                               this.norms = norms;
+                       }
+                       
+                       public override Explanation Explain(int doc)
+                       {
+                               return null; // not called... see MatchAllDocsWeight.explain()
+                       }
+                       
+                       /// <deprecated> use {@link #DocID()} instead. 
+                       /// </deprecated>
+            [Obsolete("use DocID() instead.")]
+                       public override int Doc()
+                       {
+                               return termDocs.Doc();
+                       }
+                       
+                       public override int DocID()
+                       {
+                               return doc;
+                       }
+                       
+                       /// <deprecated> use {@link #NextDoc()} instead. 
+                       /// </deprecated>
+            [Obsolete("use NextDoc() instead. ")]
+                       public override bool Next()
+                       {
+                               return NextDoc() != NO_MORE_DOCS;
+                       }
+                       
+                       public override int NextDoc()
+                       {
+                               return doc = termDocs.Next()?termDocs.Doc():NO_MORE_DOCS;
+                       }
+                       
+                       public override float Score()
+                       {
+                               return norms == null?score:score * Similarity.DecodeNorm(norms[DocID()]);
+                       }
+                       
+                       /// <deprecated> use {@link #Advance(int)} instead. 
+                       /// </deprecated>
+            [Obsolete("use Advance(int) instead.")]
+                       public override bool SkipTo(int target)
+                       {
+                               return Advance(target) != NO_MORE_DOCS;
+                       }
+                       
+                       public override int Advance(int target)
+                       {
+                               return doc = termDocs.SkipTo(target)?termDocs.Doc():NO_MORE_DOCS;
+                       }
+               }
+               
+               [Serializable]
+               private class MatchAllDocsWeight:Weight
+               {
+                       private void  InitBlock(MatchAllDocsQuery enclosingInstance)
+                       {
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private MatchAllDocsQuery enclosingInstance;
+                       public MatchAllDocsQuery Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       private Similarity similarity;
+                       private float queryWeight;
+                       private float queryNorm;
+                       
+                       public MatchAllDocsWeight(MatchAllDocsQuery enclosingInstance, Searcher searcher)
+                       {
+                               InitBlock(enclosingInstance);
+                               this.similarity = searcher.GetSimilarity();
+                       }
+                       
+                       public override System.String ToString()
+                       {
+                               return "weight(" + Enclosing_Instance + ")";
+                       }
+                       
+                       public override Query GetQuery()
+                       {
+                               return Enclosing_Instance;
+                       }
+                       
+                       public override float GetValue()
+                       {
+                               return queryWeight;
+                       }
+                       
+                       public override float SumOfSquaredWeights()
+                       {
+                               queryWeight = Enclosing_Instance.GetBoost();
+                               return queryWeight * queryWeight;
+                       }
+                       
+                       public override void  Normalize(float queryNorm)
+                       {
+                               this.queryNorm = queryNorm;
+                               queryWeight *= this.queryNorm;
+                       }
+                       
+                       public override Scorer Scorer(IndexReader reader, bool scoreDocsInOrder, bool topScorer)
+                       {
+                               return new MatchAllScorer(enclosingInstance, reader, similarity, this, Enclosing_Instance.normsField != null?reader.Norms(Enclosing_Instance.normsField):null);
+                       }
+                       
+                       public override Explanation Explain(IndexReader reader, int doc)
+                       {
+                               // explain query weight
+                               Explanation queryExpl = new ComplexExplanation(true, GetValue(), "MatchAllDocsQuery, product of:");
+                               if (Enclosing_Instance.GetBoost() != 1.0f)
+                               {
+                                       queryExpl.AddDetail(new Explanation(Enclosing_Instance.GetBoost(), "boost"));
+                               }
+                               queryExpl.AddDetail(new Explanation(queryNorm, "queryNorm"));
+                               
+                               return queryExpl;
+                       }
+               }
+               
+               public override Weight CreateWeight(Searcher searcher)
+               {
+                       return new MatchAllDocsWeight(this, searcher);
+               }
+               
+               public override void  ExtractTerms(System.Collections.Hashtable terms)
+               {
+               }
+               
+               public override System.String ToString(System.String field)
+               {
+                       System.Text.StringBuilder buffer = new System.Text.StringBuilder();
+                       buffer.Append("*:*");
+                       buffer.Append(ToStringUtils.Boost(GetBoost()));
+                       return buffer.ToString();
+               }
+               
+               public  override bool Equals(System.Object o)
+               {
+                       if (!(o is MatchAllDocsQuery))
+                               return false;
+                       MatchAllDocsQuery other = (MatchAllDocsQuery) o;
+                       return this.GetBoost() == other.GetBoost();
+               }
+               
+               public override int GetHashCode()
+               {
+                       return BitConverter.ToInt32(BitConverter.GetBytes(GetBoost()), 0) ^ 0x1AA71190;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/MultiPhraseQuery.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/MultiPhraseQuery.cs
new file mode 100644 (file)
index 0000000..0227308
--- /dev/null
@@ -0,0 +1,503 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+using MultipleTermPositions = Mono.Lucene.Net.Index.MultipleTermPositions;
+using Term = Mono.Lucene.Net.Index.Term;
+using TermPositions = Mono.Lucene.Net.Index.TermPositions;
+using ToStringUtils = Mono.Lucene.Net.Util.ToStringUtils;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary> MultiPhraseQuery is a generalized version of PhraseQuery, with an added
+       /// method {@link #Add(Term[])}.
+       /// To use this class, to search for the phrase "Microsoft app*" first use
+       /// add(Term) on the term "Microsoft", then find all terms that have "app" as
+       /// prefix using IndexReader.terms(Term), and use MultiPhraseQuery.add(Term[]
+       /// terms) to add them to the query.
+       /// 
+       /// </summary>
+       /// <version>  1.0
+       /// </version>
+       [Serializable]
+       public class MultiPhraseQuery:Query
+       {
+               private System.String field;
+               private System.Collections.ArrayList termArrays = new System.Collections.ArrayList();
+               private System.Collections.ArrayList positions = new System.Collections.ArrayList();
+               
+               private int slop = 0;
+               
+               /// <summary>Sets the phrase slop for this query.</summary>
+               /// <seealso cref="PhraseQuery.SetSlop(int)">
+               /// </seealso>
+               public virtual void  SetSlop(int s)
+               {
+                       slop = s;
+               }
+               
+               /// <summary>Sets the phrase slop for this query.</summary>
+               /// <seealso cref="PhraseQuery.GetSlop()">
+               /// </seealso>
+               public virtual int GetSlop()
+               {
+                       return slop;
+               }
+               
+               /// <summary>Add a single term at the next position in the phrase.</summary>
+               /// <seealso cref="PhraseQuery.add(Term)">
+               /// </seealso>
+               public virtual void  Add(Term term)
+               {
+                       Add(new Term[]{term});
+               }
+               
+               /// <summary>Add multiple terms at the next position in the phrase.  Any of the terms
+               /// may match.
+               /// 
+               /// </summary>
+               /// <seealso cref="PhraseQuery.add(Term)">
+               /// </seealso>
+               public virtual void  Add(Term[] terms)
+               {
+                       int position = 0;
+                       if (positions.Count > 0)
+                               position = ((System.Int32) positions[positions.Count - 1]) + 1;
+                       
+                       Add(terms, position);
+               }
+               
+               /// <summary> Allows to specify the relative position of terms within the phrase.
+               /// 
+               /// </summary>
+               /// <seealso cref="PhraseQuery.Add(Term, int)">
+               /// </seealso>
+               /// <param name="terms">
+               /// </param>
+               /// <param name="position">
+               /// </param>
+               public virtual void  Add(Term[] terms, int position)
+               {
+                       if (termArrays.Count == 0)
+                               field = terms[0].Field();
+                       
+                       for (int i = 0; i < terms.Length; i++)
+                       {
+                               if ((System.Object) terms[i].Field() != (System.Object) field)
+                               {
+                                       throw new System.ArgumentException("All phrase terms must be in the same field (" + field + "): " + terms[i]);
+                               }
+                       }
+                       
+                       termArrays.Add(terms);
+                       positions.Add((System.Int32) position);
+               }
+
+        /// <summary> Returns a List&lt;Term[]&gt; of the terms in the multiphrase.
+               /// Do not modify the List or its contents.
+               /// </summary>
+               public virtual System.Collections.IList GetTermArrays()
+               {
+                       return (System.Collections.IList) System.Collections.ArrayList.ReadOnly(new System.Collections.ArrayList(termArrays));
+               }
+               
+               /// <summary> Returns the relative positions of terms in this phrase.</summary>
+               public virtual int[] GetPositions()
+               {
+                       int[] result = new int[positions.Count];
+                       for (int i = 0; i < positions.Count; i++)
+                               result[i] = ((System.Int32) positions[i]);
+                       return result;
+               }
+               
+               // inherit javadoc
+               public override void  ExtractTerms(System.Collections.Hashtable terms)
+               {
+                       for (System.Collections.IEnumerator iter = termArrays.GetEnumerator(); iter.MoveNext(); )
+                       {
+                               Term[] arr = (Term[]) iter.Current;
+                               for (int i = 0; i < arr.Length; i++)
+                               {
+                                       SupportClass.CollectionsHelper.AddIfNotContains(terms, arr[i]);
+                               }
+                       }
+               }
+               
+               
+               [Serializable]
+               private class MultiPhraseWeight:Weight
+               {
+                       private void  InitBlock(MultiPhraseQuery enclosingInstance)
+                       {
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private MultiPhraseQuery enclosingInstance;
+                       public MultiPhraseQuery Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       private Similarity similarity;
+                       private float value_Renamed;
+                       private float idf;
+                       private float queryNorm;
+                       private float queryWeight;
+                       
+                       public MultiPhraseWeight(MultiPhraseQuery enclosingInstance, Searcher searcher)
+                       {
+                               InitBlock(enclosingInstance);
+                               this.similarity = Enclosing_Instance.GetSimilarity(searcher);
+                               
+                               // compute idf
+                               System.Collections.IEnumerator i = Enclosing_Instance.termArrays.GetEnumerator();
+                               while (i.MoveNext())
+                               {
+                                       Term[] terms = (Term[]) i.Current;
+                                       for (int j = 0; j < terms.Length; j++)
+                                       {
+                                               idf += Enclosing_Instance.GetSimilarity(searcher).Idf(terms[j], searcher);
+                                       }
+                               }
+                       }
+                       
+                       public override Query GetQuery()
+                       {
+                               return Enclosing_Instance;
+                       }
+                       public override float GetValue()
+                       {
+                               return value_Renamed;
+                       }
+                       
+                       public override float SumOfSquaredWeights()
+                       {
+                               queryWeight = idf * Enclosing_Instance.GetBoost(); // compute query weight
+                               return queryWeight * queryWeight; // square it
+                       }
+                       
+                       public override void  Normalize(float queryNorm)
+                       {
+                               this.queryNorm = queryNorm;
+                               queryWeight *= queryNorm; // normalize query weight
+                               value_Renamed = queryWeight * idf; // idf for document 
+                       }
+                       
+                       public override Scorer Scorer(IndexReader reader, bool scoreDocsInOrder, bool topScorer)
+                       {
+                               if (Enclosing_Instance.termArrays.Count == 0)
+                               // optimize zero-term case
+                                       return null;
+                               
+                               TermPositions[] tps = new TermPositions[Enclosing_Instance.termArrays.Count];
+                               for (int i = 0; i < tps.Length; i++)
+                               {
+                                       Term[] terms = (Term[]) Enclosing_Instance.termArrays[i];
+                                       
+                                       TermPositions p;
+                                       if (terms.Length > 1)
+                                               p = new MultipleTermPositions(reader, terms);
+                                       else
+                                               p = reader.TermPositions(terms[0]);
+                                       
+                                       if (p == null)
+                                               return null;
+                                       
+                                       tps[i] = p;
+                               }
+                               
+                               if (Enclosing_Instance.slop == 0)
+                                       return new ExactPhraseScorer(this, tps, Enclosing_Instance.GetPositions(), similarity, reader.Norms(Enclosing_Instance.field));
+                               else
+                                       return new SloppyPhraseScorer(this, tps, Enclosing_Instance.GetPositions(), similarity, Enclosing_Instance.slop, reader.Norms(Enclosing_Instance.field));
+                       }
+                       
+                       public override Explanation Explain(IndexReader reader, int doc)
+                       {
+                               ComplexExplanation result = new ComplexExplanation();
+                               result.SetDescription("weight(" + GetQuery() + " in " + doc + "), product of:");
+                               
+                               Explanation idfExpl = new Explanation(idf, "idf(" + GetQuery() + ")");
+                               
+                               // explain query weight
+                               Explanation queryExpl = new Explanation();
+                               queryExpl.SetDescription("queryWeight(" + GetQuery() + "), product of:");
+                               
+                               Explanation boostExpl = new Explanation(Enclosing_Instance.GetBoost(), "boost");
+                               if (Enclosing_Instance.GetBoost() != 1.0f)
+                                       queryExpl.AddDetail(boostExpl);
+                               
+                               queryExpl.AddDetail(idfExpl);
+                               
+                               Explanation queryNormExpl = new Explanation(queryNorm, "queryNorm");
+                               queryExpl.AddDetail(queryNormExpl);
+                               
+                               queryExpl.SetValue(boostExpl.GetValue() * idfExpl.GetValue() * queryNormExpl.GetValue());
+                               
+                               result.AddDetail(queryExpl);
+                               
+                               // explain field weight
+                               ComplexExplanation fieldExpl = new ComplexExplanation();
+                               fieldExpl.SetDescription("fieldWeight(" + GetQuery() + " in " + doc + "), product of:");
+                               
+                               Scorer scorer = Scorer(reader, true, false);
+                               if (scorer == null)
+                               {
+                                       return new Explanation(0.0f, "no matching docs");
+                               }
+                               Explanation tfExpl = scorer.Explain(doc);
+                               fieldExpl.AddDetail(tfExpl);
+                               fieldExpl.AddDetail(idfExpl);
+                               
+                               Explanation fieldNormExpl = new Explanation();
+                               byte[] fieldNorms = reader.Norms(Enclosing_Instance.field);
+                               float fieldNorm = fieldNorms != null?Similarity.DecodeNorm(fieldNorms[doc]):1.0f;
+                               fieldNormExpl.SetValue(fieldNorm);
+                               fieldNormExpl.SetDescription("fieldNorm(field=" + Enclosing_Instance.field + ", doc=" + doc + ")");
+                               fieldExpl.AddDetail(fieldNormExpl);
+                               
+                               fieldExpl.SetMatch(tfExpl.IsMatch());
+                               fieldExpl.SetValue(tfExpl.GetValue() * idfExpl.GetValue() * fieldNormExpl.GetValue());
+                               
+                               result.AddDetail(fieldExpl);
+                               System.Boolean? tempAux = fieldExpl.GetMatch();
+                               result.SetMatch(tempAux);
+                               
+                               // combine them
+                               result.SetValue(queryExpl.GetValue() * fieldExpl.GetValue());
+                               
+                               if (queryExpl.GetValue() == 1.0f)
+                                       return fieldExpl;
+                               
+                               return result;
+                       }
+               }
+               
+               public override Query Rewrite(IndexReader reader)
+               {
+                       if (termArrays.Count == 1)
+                       {
+                               // optimize one-term case
+                               Term[] terms = (Term[]) termArrays[0];
+                               BooleanQuery boq = new BooleanQuery(true);
+                               for (int i = 0; i < terms.Length; i++)
+                               {
+                                       boq.Add(new TermQuery(terms[i]), BooleanClause.Occur.SHOULD);
+                               }
+                               boq.SetBoost(GetBoost());
+                               return boq;
+                       }
+                       else
+                       {
+                               return this;
+                       }
+               }
+               
+               public override Weight CreateWeight(Searcher searcher)
+               {
+                       return new MultiPhraseWeight(this, searcher);
+               }
+               
+               /// <summary>Prints a user-readable version of this query. </summary>
+               public override System.String ToString(System.String f)
+               {
+                       System.Text.StringBuilder buffer = new System.Text.StringBuilder();
+                       if (!field.Equals(f))
+                       {
+                               buffer.Append(field);
+                               buffer.Append(":");
+                       }
+                       
+                       buffer.Append("\"");
+                       System.Collections.IEnumerator i = termArrays.GetEnumerator();
+            bool first = true;
+                       while (i.MoveNext())
+                       {
+                if (first)
+                {
+                    first = false;
+                }
+                else
+                {
+                    buffer.Append(" ");
+                }
+
+                               Term[] terms = (Term[]) i.Current;
+                               if (terms.Length > 1)
+                               {
+                                       buffer.Append("(");
+                                       for (int j = 0; j < terms.Length; j++)
+                                       {
+                                               buffer.Append(terms[j].Text());
+                                               if (j < terms.Length - 1)
+                                                       buffer.Append(" ");
+                                       }
+                                       buffer.Append(")");
+                               }
+                               else
+                               {
+                                       buffer.Append(terms[0].Text());
+                               }
+                       }
+                       buffer.Append("\"");
+                       
+                       if (slop != 0)
+                       {
+                               buffer.Append("~");
+                               buffer.Append(slop);
+                       }
+                       
+                       buffer.Append(ToStringUtils.Boost(GetBoost()));
+                       
+                       return buffer.ToString();
+               }
+               
+               
+               /// <summary>Returns true if <code>o</code> is equal to this. </summary>
+               public  override bool Equals(System.Object o)
+               {
+                       if (!(o is MultiPhraseQuery))
+                               return false;
+                       MultiPhraseQuery other = (MultiPhraseQuery) o;
+            bool eq = this.GetBoost() == other.GetBoost() && this.slop == other.slop;
+            if(!eq)
+            {
+                return false;
+            }
+            eq = this.termArrays.Count.Equals(other.termArrays.Count);
+            if (!eq)
+            {
+                return false;
+            }
+
+            for (int i = 0; i < this.termArrays.Count; i++)
+            {
+                if (!SupportClass.Compare.CompareTermArrays((Term[])this.termArrays[i], (Term[])other.termArrays[i]))
+                {
+                    return false;
+                }
+            }
+            if(!eq)
+            {
+                return false;
+            }
+            eq = this.positions.Count.Equals(other.positions.Count);
+            if (!eq)
+            {
+                return false;
+            }
+            for (int i = 0; i < this.positions.Count; i++)
+            {
+                if (!((int)this.positions[i] == (int)other.positions[i]))
+                {
+                    return false;
+                }
+            }
+            return true;
+        }
+               
+               /// <summary>Returns a hash code value for this object.</summary>
+               public override int GetHashCode()
+               {
+            int posHash = 0;
+            foreach(int pos in positions)
+            {
+                posHash += pos.GetHashCode();
+            }
+                       return BitConverter.ToInt32(BitConverter.GetBytes(GetBoost()), 0) ^ slop ^ TermArraysHashCode() ^ posHash ^ 0x4AC65113;
+               }
+               
+               // Breakout calculation of the termArrays hashcode
+               private int TermArraysHashCode()
+               {
+                       int hashCode = 1;
+                       System.Collections.IEnumerator iterator = termArrays.GetEnumerator();
+                       while (iterator.MoveNext())
+                       {
+                               Term[] termArray = (Term[]) iterator.Current;
+                               hashCode = 31 * hashCode + (termArray == null?0:ArraysHashCode(termArray));
+                       }
+                       return hashCode;
+               }
+               
+               private int ArraysHashCode(Term[] termArray)
+               {
+                       if (termArray == null)
+                               return 0;
+                       
+                       int result = 1;
+                       
+                       for (int i = 0; i < termArray.Length; i++)
+                       {
+                               Term term = termArray[i];
+                               result = 31 * result + (term == null?0:term.GetHashCode());
+                       }
+                       
+                       return result;
+               }
+               
+               // Breakout calculation of the termArrays equals
+               private bool TermArraysEquals(System.Collections.IList termArrays1, System.Collections.IList termArrays2)
+               {
+                       if (termArrays1.Count != termArrays2.Count)
+                       {
+                               return false;
+                       }
+                       System.Collections.IEnumerator iterator1 = termArrays1.GetEnumerator();
+                       System.Collections.IEnumerator iterator2 = termArrays2.GetEnumerator();
+                       while (iterator1.MoveNext())
+                       {
+                               Term[] termArray1 = (Term[]) iterator1.Current;
+                               Term[] termArray2 = (Term[]) iterator2.Current;
+                               if (!(termArray1 == null ? termArray2 == null : TermEquals(termArray1, termArray2)))
+                               {
+                                       return false;
+                               }
+                       }
+                       return true;
+               }
+
+        public static bool TermEquals(System.Array array1, System.Array array2)
+        {
+            bool result = false;
+            if ((array1 == null) && (array2 == null))
+                result = true;
+            else if ((array1 != null) && (array2 != null))
+            {
+                if (array1.Length == array2.Length)
+                {
+                    int length = array1.Length;
+                    result = true;
+                    for (int index = 0; index < length; index++)
+                    {
+                        if (!(array1.GetValue(index).Equals(array2.GetValue(index))))
+                        {
+                            result = false;
+                            break;
+                        }
+                    }
+                }
+            }
+            return result;
+        }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/MultiSearcher.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/MultiSearcher.cs
new file mode 100644 (file)
index 0000000..ebacad9
--- /dev/null
@@ -0,0 +1,429 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using Document = Mono.Lucene.Net.Documents.Document;
+using FieldSelector = Mono.Lucene.Net.Documents.FieldSelector;
+using CorruptIndexException = Mono.Lucene.Net.Index.CorruptIndexException;
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+using Term = Mono.Lucene.Net.Index.Term;
+using ReaderUtil = Mono.Lucene.Net.Util.ReaderUtil;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary>Implements search over a set of <code>Searchables</code>.
+       /// 
+       /// <p/>Applications usually need only call the inherited {@link #Search(Query)}
+       /// or {@link #Search(Query,Filter)} methods.
+       /// </summary>
+       public class MultiSearcher:Searcher
+       {
+               private class AnonymousClassCollector:Collector
+               {
+                       public AnonymousClassCollector(Mono.Lucene.Net.Search.Collector collector, int start, MultiSearcher enclosingInstance)
+                       {
+                               InitBlock(collector, start, enclosingInstance);
+                       }
+                       private void  InitBlock(Mono.Lucene.Net.Search.Collector collector, int start, MultiSearcher enclosingInstance)
+                       {
+                               this.collector = collector;
+                               this.start = start;
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private Mono.Lucene.Net.Search.Collector collector;
+                       private int start;
+                       private MultiSearcher enclosingInstance;
+                       public MultiSearcher Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       public override void  SetScorer(Scorer scorer)
+                       {
+                               collector.SetScorer(scorer);
+                       }
+                       public override void  Collect(int doc)
+                       {
+                               collector.Collect(doc);
+                       }
+                       public override void  SetNextReader(IndexReader reader, int docBase)
+                       {
+                               collector.SetNextReader(reader, start + docBase);
+                       }
+                       public override bool AcceptsDocsOutOfOrder()
+                       {
+                               return collector.AcceptsDocsOutOfOrder();
+                       }
+               }
+               
+               /// <summary> Document Frequency cache acting as a Dummy-Searcher. This class is no
+               /// full-fledged Searcher, but only supports the methods necessary to
+               /// initialize Weights.
+               /// </summary>
+               private class CachedDfSource:Searcher
+               {
+                       private System.Collections.IDictionary dfMap; // Map from Terms to corresponding doc freqs
+                       private int maxDoc; // document count
+                       
+                       public CachedDfSource(System.Collections.IDictionary dfMap, int maxDoc, Similarity similarity)
+                       {
+                               this.dfMap = dfMap;
+                               this.maxDoc = maxDoc;
+                               SetSimilarity(similarity);
+                       }
+                       
+                       public override int DocFreq(Term term)
+                       {
+                               int df;
+                               try
+                               {
+                                       df = ((System.Int32) dfMap[term]);
+                               }
+                               catch (System.NullReferenceException e)
+                               {
+                                       throw new System.ArgumentException("df for term " + term.Text() + " not available");
+                               }
+                               return df;
+                       }
+                       
+                       public override int[] DocFreqs(Term[] terms)
+                       {
+                               int[] result = new int[terms.Length];
+                               for (int i = 0; i < terms.Length; i++)
+                               {
+                                       result[i] = DocFreq(terms[i]);
+                               }
+                               return result;
+                       }
+                       
+                       public override int MaxDoc()
+                       {
+                               return maxDoc;
+                       }
+                       
+                       public override Query Rewrite(Query query)
+                       {
+                               // this is a bit of a hack. We know that a query which
+                               // creates a Weight based on this Dummy-Searcher is
+                               // always already rewritten (see preparedWeight()).
+                               // Therefore we just return the unmodified query here
+                               return query;
+                       }
+                       
+                       public override void  Close()
+                       {
+                               throw new System.NotSupportedException();
+                       }
+
+            /// <summary>
+            /// .NET
+            /// </summary>
+            public override void Dispose()
+            {
+                Close();
+            }
+                       
+                       public override Document Doc(int i)
+                       {
+                               throw new System.NotSupportedException();
+                       }
+                       
+                       public override Document Doc(int i, FieldSelector fieldSelector)
+                       {
+                               throw new System.NotSupportedException();
+                       }
+                       
+                       public override Explanation Explain(Weight weight, int doc)
+                       {
+                               throw new System.NotSupportedException();
+                       }
+                       
+                       public override void  Search(Weight weight, Filter filter, Collector results)
+                       {
+                               throw new System.NotSupportedException();
+                       }
+                       
+                       public override TopDocs Search(Weight weight, Filter filter, int n)
+                       {
+                               throw new System.NotSupportedException();
+                       }
+                       
+                       public override TopFieldDocs Search(Weight weight, Filter filter, int n, Sort sort)
+                       {
+                               throw new System.NotSupportedException();
+                       }
+               }
+               
+               private Searchable[] searchables;
+               private int[] starts;
+               private int maxDoc = 0;
+               
+               /// <summary>Creates a searcher which searches <i>searchers</i>. </summary>
+               public MultiSearcher(Searchable[] searchables)
+               {
+                       this.searchables = searchables;
+                       
+                       starts = new int[searchables.Length + 1]; // build starts array
+                       for (int i = 0; i < searchables.Length; i++)
+                       {
+                               starts[i] = maxDoc;
+                               maxDoc += searchables[i].MaxDoc(); // compute maxDocs
+                       }
+                       starts[searchables.Length] = maxDoc;
+               }
+               
+               /// <summary>Return the array of {@link Searchable}s this searches. </summary>
+               public virtual Searchable[] GetSearchables()
+               {
+                       return searchables;
+               }
+               
+               protected internal virtual int[] GetStarts()
+               {
+                       return starts;
+               }
+               
+               // inherit javadoc
+               public override void  Close()
+               {
+                       for (int i = 0; i < searchables.Length; i++)
+                               searchables[i].Close();
+               }
+
+        /// <summary>
+        /// .NET
+        /// </summary>
+        public override void Dispose()
+        {
+            Close();
+        }
+
+               public override int DocFreq(Term term)
+               {
+                       int docFreq = 0;
+                       for (int i = 0; i < searchables.Length; i++)
+                               docFreq += searchables[i].DocFreq(term);
+                       return docFreq;
+               }
+               
+               // inherit javadoc
+               public override Document Doc(int n)
+               {
+                       int i = SubSearcher(n); // find searcher index
+                       return searchables[i].Doc(n - starts[i]); // dispatch to searcher
+               }
+               
+               // inherit javadoc
+               public override Document Doc(int n, FieldSelector fieldSelector)
+               {
+                       int i = SubSearcher(n); // find searcher index
+                       return searchables[i].Doc(n - starts[i], fieldSelector); // dispatch to searcher
+               }
+               
+               /// <summary>Returns index of the searcher for document <code>n</code> in the array
+               /// used to construct this searcher. 
+               /// </summary>
+               public virtual int SubSearcher(int n)
+               {
+                       // find searcher for doc n:
+                       return ReaderUtil.SubIndex(n, starts);
+               }
+               
+               /// <summary>Returns the document number of document <code>n</code> within its
+               /// sub-index. 
+               /// </summary>
+               public virtual int SubDoc(int n)
+               {
+                       return n - starts[SubSearcher(n)];
+               }
+               
+               public override int MaxDoc()
+               {
+                       return maxDoc;
+               }
+               
+               public override TopDocs Search(Weight weight, Filter filter, int nDocs)
+               {
+                       
+                       HitQueue hq = new HitQueue(nDocs, false);
+                       int totalHits = 0;
+                       
+                       for (int i = 0; i < searchables.Length; i++)
+                       {
+                               // search each searcher
+                               TopDocs docs = searchables[i].Search(weight, filter, nDocs);
+                               totalHits += docs.TotalHits; // update totalHits
+                               ScoreDoc[] scoreDocs = docs.ScoreDocs;
+                               for (int j = 0; j < scoreDocs.Length; j++)
+                               {
+                                       // merge scoreDocs into hq
+                                       ScoreDoc scoreDoc = scoreDocs[j];
+                                       scoreDoc.doc += starts[i]; // convert doc
+                                       if (!hq.Insert(scoreDoc))
+                                               break; // no more scores > minScore
+                               }
+                       }
+                       
+                       ScoreDoc[] scoreDocs2 = new ScoreDoc[hq.Size()];
+                       for (int i = hq.Size() - 1; i >= 0; i--)
+                       // put docs in array
+                               scoreDocs2[i] = (ScoreDoc) hq.Pop();
+                       
+                       float maxScore = (totalHits == 0)?System.Single.NegativeInfinity:scoreDocs2[0].score;
+                       
+                       return new TopDocs(totalHits, scoreDocs2, maxScore);
+               }
+               
+               public override TopFieldDocs Search(Weight weight, Filter filter, int n, Sort sort)
+               {
+                       FieldDocSortedHitQueue hq = null;
+                       int totalHits = 0;
+                       
+                       float maxScore = System.Single.NegativeInfinity;
+                       
+                       for (int i = 0; i < searchables.Length; i++)
+                       {
+                               // search each searcher
+                               TopFieldDocs docs = searchables[i].Search(weight, filter, n, sort);
+                               // If one of the Sort fields is FIELD_DOC, need to fix its values, so that
+                               // it will break ties by doc Id properly. Otherwise, it will compare to
+                               // 'relative' doc Ids, that belong to two different searchers.
+                               for (int j = 0; j < docs.fields.Length; j++)
+                               {
+                                       if (docs.fields[j].GetType() == SortField.DOC)
+                                       {
+                                               // iterate over the score docs and change their fields value
+                                               for (int j2 = 0; j2 < docs.ScoreDocs.Length; j2++)
+                                               {
+                                                       FieldDoc fd = (FieldDoc) docs.ScoreDocs[j2];
+                                                       fd.fields[j] = (System.Int32) (((System.Int32) fd.fields[j]) + starts[i]);
+                                               }
+                                               break;
+                                       }
+                               }
+                               if (hq == null)
+                                       hq = new FieldDocSortedHitQueue(docs.fields, n);
+                               totalHits += docs.TotalHits; // update totalHits
+                               maxScore = System.Math.Max(maxScore, docs.GetMaxScore());
+                               ScoreDoc[] scoreDocs = docs.ScoreDocs;
+                               for (int j = 0; j < scoreDocs.Length; j++)
+                               {
+                                       // merge scoreDocs into hq
+                                       ScoreDoc scoreDoc = scoreDocs[j];
+                                       scoreDoc.doc += starts[i]; // convert doc
+                                       if (!hq.Insert(scoreDoc))
+                                               break; // no more scores > minScore
+                               }
+                       }
+                       
+                       ScoreDoc[] scoreDocs2 = new ScoreDoc[hq.Size()];
+                       for (int i = hq.Size() - 1; i >= 0; i--)
+                       // put docs in array
+                               scoreDocs2[i] = (ScoreDoc) hq.Pop();
+                       
+                       return new TopFieldDocs(totalHits, scoreDocs2, hq.GetFields(), maxScore);
+               }
+               
+               // inherit javadoc
+               public override void  Search(Weight weight, Filter filter, Collector collector)
+               {
+                       for (int i = 0; i < searchables.Length; i++)
+                       {
+                               
+                               int start = starts[i];
+                               
+                               Collector hc = new AnonymousClassCollector(collector, start, this);
+                               
+                               searchables[i].Search(weight, filter, hc);
+                       }
+               }
+               
+               public override Query Rewrite(Query original)
+               {
+                       Query[] queries = new Query[searchables.Length];
+                       for (int i = 0; i < searchables.Length; i++)
+                       {
+                               queries[i] = searchables[i].Rewrite(original);
+                       }
+                       return queries[0].Combine(queries);
+               }
+               
+               public override Explanation Explain(Weight weight, int doc)
+               {
+                       int i = SubSearcher(doc); // find searcher index
+                       return searchables[i].Explain(weight, doc - starts[i]); // dispatch to searcher
+               }
+               
+               /// <summary> Create weight in multiple index scenario.
+               /// 
+               /// Distributed query processing is done in the following steps:
+               /// 1. rewrite query
+               /// 2. extract necessary terms
+               /// 3. collect dfs for these terms from the Searchables
+               /// 4. create query weight using aggregate dfs.
+               /// 5. distribute that weight to Searchables
+               /// 6. merge results
+               /// 
+               /// Steps 1-4 are done here, 5+6 in the search() methods
+               /// 
+               /// </summary>
+               /// <returns> rewritten queries
+               /// </returns>
+               public /*protected internal*/ override Weight CreateWeight(Query original)
+               {
+                       // step 1
+                       Query rewrittenQuery = Rewrite(original);
+                       
+                       // step 2
+                       System.Collections.Hashtable terms = new System.Collections.Hashtable();
+                       rewrittenQuery.ExtractTerms(terms);
+                       
+                       // step3
+                       Term[] allTermsArray = new Term[terms.Count];
+            int index = 0;
+            System.Collections.IEnumerator e = terms.Keys.GetEnumerator();
+            while (e.MoveNext())
+                allTermsArray[index++] = e.Current as Term;
+            int[] aggregatedDfs = new int[terms.Count];
+                       for (int i = 0; i < searchables.Length; i++)
+                       {
+                               int[] dfs = searchables[i].DocFreqs(allTermsArray);
+                               for (int j = 0; j < aggregatedDfs.Length; j++)
+                               {
+                                       aggregatedDfs[j] += dfs[j];
+                               }
+                       }
+                       
+                       System.Collections.Hashtable dfMap = new System.Collections.Hashtable();
+                       for (int i = 0; i < allTermsArray.Length; i++)
+                       {
+                               dfMap[allTermsArray[i]] = (System.Int32) aggregatedDfs[i];
+                       }
+                       
+                       // step4
+                       int numDocs = MaxDoc();
+                       CachedDfSource cacheSim = new CachedDfSource(dfMap, numDocs, GetSimilarity());
+                       
+                       return rewrittenQuery.Weight(cacheSim);
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/MultiTermQuery.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/MultiTermQuery.cs
new file mode 100644 (file)
index 0000000..b38bf70
--- /dev/null
@@ -0,0 +1,532 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using System.Runtime.InteropServices;
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+using Term = Mono.Lucene.Net.Index.Term;
+using QueryParser = Mono.Lucene.Net.QueryParsers.QueryParser;
+using ToStringUtils = Mono.Lucene.Net.Util.ToStringUtils;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary> An abstract {@link Query} that matches documents
+       /// containing a subset of terms provided by a {@link
+       /// FilteredTermEnum} enumeration.
+       /// 
+       /// <p/>This query cannot be used directly; you must subclass
+       /// it and define {@link #getEnum} to provide a {@link
+       /// FilteredTermEnum} that iterates through the terms to be
+       /// matched.
+       /// 
+       /// <p/><b>NOTE</b>: if {@link #setRewriteMethod} is either
+       /// {@link #CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE} or {@link
+       /// #SCORING_BOOLEAN_QUERY_REWRITE}, you may encounter a
+       /// {@link BooleanQuery.TooManyClauses} exception during
+       /// searching, which happens when the number of terms to be
+       /// searched exceeds {@link
+       /// BooleanQuery#GetMaxClauseCount()}.  Setting {@link
+       /// #setRewriteMethod} to {@link #CONSTANT_SCORE_FILTER_REWRITE}
+       /// prevents this.
+       /// 
+       /// <p/>The recommended rewrite method is {@link
+       /// #CONSTANT_SCORE_AUTO_REWRITE_DEFAULT}: it doesn't spend CPU
+       /// computing unhelpful scores, and it tries to pick the most
+       /// performant rewrite method given the query.
+       /// 
+       /// Note that {@link QueryParser} produces
+       /// MultiTermQueries using {@link
+       /// #CONSTANT_SCORE_AUTO_REWRITE_DEFAULT} by default.
+       /// </summary>
+       [Serializable]
+       public abstract class MultiTermQuery:Query
+       {
+               [Serializable]
+               public class AnonymousClassConstantScoreAutoRewrite:ConstantScoreAutoRewrite
+               {
+                       public override void  SetTermCountCutoff(int count)
+                       {
+                               throw new System.NotSupportedException("Please create a private instance");
+                       }
+                       
+                       public override void  SetDocCountPercent(double percent)
+                       {
+                               throw new System.NotSupportedException("Please create a private instance");
+                       }
+                       
+                       // Make sure we are still a singleton even after deserializing
+                       protected internal virtual System.Object ReadResolve()
+                       {
+                               return Mono.Lucene.Net.Search.MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT;
+                       }
+               }
+               /* @deprecated move to sub class */
+               protected internal Term term;
+               protected internal RewriteMethod rewriteMethod = CONSTANT_SCORE_AUTO_REWRITE_DEFAULT;
+               [NonSerialized]
+               internal int numberOfTerms = 0;
+               
+               /// <summary>Abstract class that defines how the query is rewritten. </summary>
+               [Serializable]
+               public abstract class RewriteMethod
+               {
+                       public abstract Query Rewrite(IndexReader reader, MultiTermQuery query);
+               }
+               
+               [Serializable]
+               private sealed class ConstantScoreFilterRewrite:RewriteMethod
+               {
+                       public override Query Rewrite(IndexReader reader, MultiTermQuery query)
+                       {
+                               Query result = new ConstantScoreQuery(new MultiTermQueryWrapperFilter(query));
+                               result.SetBoost(query.GetBoost());
+                               return result;
+                       }
+                       
+                       // Make sure we are still a singleton even after deserializing
+                       internal System.Object ReadResolve()
+                       {
+                               return Mono.Lucene.Net.Search.MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE;
+                       }
+               }
+               
+               /// <summary>A rewrite method that first creates a private Filter,
+               /// by visiting each term in sequence and marking all docs
+               /// for that term.  Matching documents are assigned a
+               /// constant score equal to the query's boost.
+               /// 
+               /// <p/> This method is faster than the BooleanQuery
+               /// rewrite methods when the number of matched terms or
+               /// matched documents is non-trivial. Also, it will never
+               /// hit an errant {@link BooleanQuery.TooManyClauses}
+               /// exception.
+               /// 
+               /// </summary>
+               /// <seealso cref="setRewriteMethod">
+               /// </seealso>
+               public static readonly RewriteMethod CONSTANT_SCORE_FILTER_REWRITE = new ConstantScoreFilterRewrite();
+               
+               [Serializable]
+               private class ScoringBooleanQueryRewrite:RewriteMethod
+               {
+                       public override Query Rewrite(IndexReader reader, MultiTermQuery query)
+                       {
+                               
+                               FilteredTermEnum enumerator = query.GetEnum(reader);
+                               BooleanQuery result = new BooleanQuery(true);
+                               int count = 0;
+                               try
+                               {
+                                       do 
+                                       {
+                                               Term t = enumerator.Term();
+                                               if (t != null)
+                                               {
+                                                       TermQuery tq = new TermQuery(t); // found a match
+                                                       tq.SetBoost(query.GetBoost() * enumerator.Difference()); // set the boost
+                                                       result.Add(tq, BooleanClause.Occur.SHOULD); // add to query
+                                                       count++;
+                                               }
+                                       }
+                                       while (enumerator.Next());
+                               }
+                               finally
+                               {
+                                       enumerator.Close();
+                               }
+                               query.IncTotalNumberOfTerms(count);
+                               return result;
+                       }
+                       
+                       // Make sure we are still a singleton even after deserializing
+                       protected internal virtual System.Object ReadResolve()
+                       {
+                               return Mono.Lucene.Net.Search.MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE;
+                       }
+               }
+               
+               /// <summary>A rewrite method that first translates each term into
+               /// {@link BooleanClause.Occur#SHOULD} clause in a
+               /// BooleanQuery, and keeps the scores as computed by the
+               /// query.  Note that typically such scores are
+               /// meaningless to the user, and require non-trivial CPU
+               /// to compute, so it's almost always better to use {@link
+               /// #CONSTANT_SCORE_AUTO_REWRITE_DEFAULT} instead.
+               /// 
+               /// <p/><b>NOTE</b>: This rewrite method will hit {@link
+               /// BooleanQuery.TooManyClauses} if the number of terms
+               /// exceeds {@link BooleanQuery#getMaxClauseCount}.
+               /// 
+               /// </summary>
+               /// <seealso cref="setRewriteMethod">
+               /// </seealso>
+               public static readonly RewriteMethod SCORING_BOOLEAN_QUERY_REWRITE = new ScoringBooleanQueryRewrite();
+               
+               [Serializable]
+               private class ConstantScoreBooleanQueryRewrite:ScoringBooleanQueryRewrite
+               {
+                       public override Query Rewrite(IndexReader reader, MultiTermQuery query)
+                       {
+                               // strip the scores off
+                               Query result = new ConstantScoreQuery(new QueryWrapperFilter(base.Rewrite(reader, query)));
+                               result.SetBoost(query.GetBoost());
+                               return result;
+                       }
+                       
+                       // Make sure we are still a singleton even after deserializing
+                       protected internal override System.Object ReadResolve()
+                       {
+                               return Mono.Lucene.Net.Search.MultiTermQuery.CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE;
+                       }
+               }
+               
+               /// <summary>Like {@link #SCORING_BOOLEAN_QUERY_REWRITE} except
+               /// scores are not computed.  Instead, each matching
+               /// document receives a constant score equal to the
+               /// query's boost.
+               /// 
+               /// <p/><b>NOTE</b>: This rewrite method will hit {@link
+               /// BooleanQuery.TooManyClauses} if the number of terms
+               /// exceeds {@link BooleanQuery#getMaxClauseCount}.
+               /// 
+               /// </summary>
+               /// <seealso cref="setRewriteMethod">
+               /// </seealso>
+               public static readonly RewriteMethod CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE = new ConstantScoreBooleanQueryRewrite();
+               
+               
+               /// <summary>A rewrite method that tries to pick the best
+               /// constant-score rewrite method based on term and
+               /// document counts from the query.  If both the number of
+               /// terms and documents is small enough, then {@link
+               /// #CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE} is used.
+               /// Otherwise, {@link #CONSTANT_SCORE_FILTER_REWRITE} is
+               /// used.
+               /// </summary>
+               [Serializable]
+               public class ConstantScoreAutoRewrite:RewriteMethod
+               {
+                       public ConstantScoreAutoRewrite()
+                       {
+                               InitBlock();
+                       }
+                       private void  InitBlock()
+                       {
+                               termCountCutoff = DEFAULT_TERM_COUNT_CUTOFF;
+                               docCountPercent = DEFAULT_DOC_COUNT_PERCENT;
+                       }
+                       
+                       // Defaults derived from rough tests with a 20.0 million
+                       // doc Wikipedia index.  With more than 350 terms in the
+                       // query, the filter method is fastest:
+                       public static int DEFAULT_TERM_COUNT_CUTOFF = 350;
+                       
+                       // If the query will hit more than 1 in 1000 of the docs
+                       // in the index (0.1%), the filter method is fastest:
+                       public static double DEFAULT_DOC_COUNT_PERCENT = 0.1;
+                       
+                       private int termCountCutoff;
+                       private double docCountPercent;
+                       
+                       /// <summary>If the number of terms in this query is equal to or
+                       /// larger than this setting then {@link
+                       /// #CONSTANT_SCORE_FILTER_REWRITE} is used. 
+                       /// </summary>
+                       public virtual void  SetTermCountCutoff(int count)
+                       {
+                               termCountCutoff = count;
+                       }
+                       
+                       /// <seealso cref="setTermCountCutoff">
+                       /// </seealso>
+                       public virtual int GetTermCountCutoff()
+                       {
+                               return termCountCutoff;
+                       }
+                       
+                       /// <summary>If the number of documents to be visited in the
+                       /// postings exceeds this specified percentage of the
+                       /// maxDoc() for the index, then {@link
+                       /// #CONSTANT_SCORE_FILTER_REWRITE} is used.
+                       /// </summary>
+                       /// <param name="percent">0.0 to 100.0 
+                       /// </param>
+                       public virtual void  SetDocCountPercent(double percent)
+                       {
+                               docCountPercent = percent;
+                       }
+                       
+                       /// <seealso cref="setDocCountPercent">
+                       /// </seealso>
+                       public virtual double GetDocCountPercent()
+                       {
+                               return docCountPercent;
+                       }
+                       
+                       public override Query Rewrite(IndexReader reader, MultiTermQuery query)
+                       {
+                               // Get the enum and start visiting terms.  If we
+                               // exhaust the enum before hitting either of the
+                               // cutoffs, we use ConstantBooleanQueryRewrite; else,
+                               // ConstantFilterRewrite:
+                               System.Collections.ArrayList pendingTerms = new System.Collections.ArrayList();
+                               int docCountCutoff = (int) ((docCountPercent / 100.0) * reader.MaxDoc());
+                               int termCountLimit = System.Math.Min(BooleanQuery.GetMaxClauseCount(), termCountCutoff);
+                               int docVisitCount = 0;
+                               
+                               FilteredTermEnum enumerator = query.GetEnum(reader);
+                               try
+                               {
+                                       while (true)
+                                       {
+                                               Term t = enumerator.Term();
+                                               if (t != null)
+                                               {
+                                                       pendingTerms.Add(t);
+                                                       // Loading the TermInfo from the terms dict here
+                                                       // should not be costly, because 1) the
+                                                       // query/filter will load the TermInfo when it
+                                                       // runs, and 2) the terms dict has a cache:
+                                                       docVisitCount += reader.DocFreq(t);
+                                               }
+                                               
+                                               if (pendingTerms.Count >= termCountLimit || docVisitCount >= docCountCutoff)
+                                               {
+                                                       // Too many terms -- make a filter.
+                                                       Query result = new ConstantScoreQuery(new MultiTermQueryWrapperFilter(query));
+                                                       result.SetBoost(query.GetBoost());
+                                                       return result;
+                                               }
+                                               else if (!enumerator.Next())
+                                               {
+                                                       // Enumeration is done, and we hit a small
+                                                       // enough number of terms & docs -- just make a
+                                                       // BooleanQuery, now
+                                                       System.Collections.IEnumerator it = pendingTerms.GetEnumerator();
+                                                       BooleanQuery bq = new BooleanQuery(true);
+                                                       while (it.MoveNext())
+                                                       {
+                                                               TermQuery tq = new TermQuery((Term) it.Current);
+                                                               bq.Add(tq, BooleanClause.Occur.SHOULD);
+                                                       }
+                                                       // Strip scores
+                                                       Query result = new ConstantScoreQuery(new QueryWrapperFilter(bq));
+                                                       result.SetBoost(query.GetBoost());
+                                                       query.IncTotalNumberOfTerms(pendingTerms.Count);
+                                                       return result;
+                                               }
+                                       }
+                               }
+                               finally
+                               {
+                                       enumerator.Close();
+                               }
+                       }
+                       
+                       public override int GetHashCode()
+                       {
+                               int prime = 1279;
+                               return (int) (prime * termCountCutoff + BitConverter.DoubleToInt64Bits(docCountPercent));
+                       }
+                       
+                       public  override bool Equals(System.Object obj)
+                       {
+                               if (this == obj)
+                                       return true;
+                               if (obj == null)
+                                       return false;
+                               if (GetType() != obj.GetType())
+                                       return false;
+                               
+                               ConstantScoreAutoRewrite other = (ConstantScoreAutoRewrite) obj;
+                               if (other.termCountCutoff != termCountCutoff)
+                               {
+                                       return false;
+                               }
+                               
+                               if (BitConverter.DoubleToInt64Bits(other.docCountPercent) != BitConverter.DoubleToInt64Bits(docCountPercent))
+                               {
+                                       return false;
+                               }
+                               
+                               return true;
+                       }
+               }
+               
+               /// <summary>Read-only default instance of {@link
+               /// ConstantScoreAutoRewrite}, with {@link
+               /// ConstantScoreAutoRewrite#setTermCountCutoff} set to
+               /// {@link
+               /// ConstantScoreAutoRewrite#DEFAULT_TERM_COUNT_CUTOFF}
+               /// and {@link
+               /// ConstantScoreAutoRewrite#setDocCountPercent} set to
+               /// {@link
+               /// ConstantScoreAutoRewrite#DEFAULT_DOC_COUNT_PERCENT}.
+               /// Note that you cannot alter the configuration of this
+               /// instance; you'll need to create a private instance
+               /// instead. 
+               /// </summary>
+               public static readonly RewriteMethod CONSTANT_SCORE_AUTO_REWRITE_DEFAULT;
+               
+               /// <summary> Constructs a query for terms matching <code>term</code>.</summary>
+               /// <deprecated> check sub class for possible term access - the Term does not
+               /// make sense for all MultiTermQuerys and will be removed.
+               /// </deprecated>
+        [Obsolete("check sub class for possible term access - the Term does not make sense for all MultiTermQuerys and will be removed.")]
+               public MultiTermQuery(Term term)
+               {
+                       this.term = term;
+               }
+               
+               /// <summary> Constructs a query matching terms that cannot be represented with a single
+               /// Term.
+               /// </summary>
+               public MultiTermQuery()
+               {
+               }
+               
+               /// <summary> Returns the pattern term.</summary>
+               /// <deprecated> check sub class for possible term access - getTerm does not
+               /// make sense for all MultiTermQuerys and will be removed.
+               /// </deprecated>
+        [Obsolete("check sub class for possible term access - getTerm does not make sense for all MultiTermQuerys and will be removed.")]
+               public virtual Term GetTerm()
+               {
+                       return term;
+               }
+               
+               /// <summary>Construct the enumeration to be used, expanding the pattern term. </summary>
+               public /*protected internal*/ abstract FilteredTermEnum GetEnum(IndexReader reader);
+               
+               /// <summary> Expert: Return the number of unique terms visited during execution of the query.
+               /// If there are many of them, you may consider using another query type
+               /// or optimize your total term count in index.
+               /// <p/>This method is not thread safe, be sure to only call it when no query is running!
+               /// If you re-use the same query instance for another
+               /// search, be sure to first reset the term counter
+               /// with {@link #clearTotalNumberOfTerms}.
+               /// <p/>On optimized indexes / no MultiReaders, you get the correct number of
+               /// unique terms for the whole index. Use this number to compare different queries.
+               /// For non-optimized indexes this number can also be achived in
+               /// non-constant-score mode. In constant-score mode you get the total number of
+               /// terms seeked for all segments / sub-readers.
+               /// </summary>
+               /// <seealso cref="clearTotalNumberOfTerms">
+               /// </seealso>
+               public virtual int GetTotalNumberOfTerms()
+               {
+                       return numberOfTerms;
+               }
+               
+               /// <summary> Expert: Resets the counting of unique terms.
+               /// Do this before executing the query/filter.
+               /// </summary>
+               /// <seealso cref="getTotalNumberOfTerms">
+               /// </seealso>
+               public virtual void  ClearTotalNumberOfTerms()
+               {
+                       numberOfTerms = 0;
+               }
+               
+               protected internal virtual void  IncTotalNumberOfTerms(int inc)
+               {
+                       numberOfTerms += inc;
+               }
+               
+               public override Query Rewrite(IndexReader reader)
+               {
+                       return rewriteMethod.Rewrite(reader, this);
+               }
+               
+               
+               /* Prints a user-readable version of this query.
+               * Implemented for back compat in case MultiTermQuery
+               * subclasses do no implement.
+               */
+               public override System.String ToString(System.String field)
+               {
+                       System.Text.StringBuilder buffer = new System.Text.StringBuilder();
+                       if (term != null)
+                       {
+                               if (!term.Field().Equals(field))
+                               {
+                                       buffer.Append(term.Field());
+                                       buffer.Append(":");
+                               }
+                               buffer.Append(term.Text());
+                       }
+                       else
+                       {
+                               buffer.Append("termPattern:unknown");
+                       }
+                       buffer.Append(ToStringUtils.Boost(GetBoost()));
+                       return buffer.ToString();
+               }
+               
+               /// <seealso cref="setRewriteMethod">
+               /// </seealso>
+               public virtual RewriteMethod GetRewriteMethod()
+               {
+                       return rewriteMethod;
+               }
+               
+               /// <summary> Sets the rewrite method to be used when executing the
+               /// query.  You can use one of the four core methods, or
+               /// implement your own subclass of {@link RewriteMethod}. 
+               /// </summary>
+               public virtual void  SetRewriteMethod(RewriteMethod method)
+               {
+                       rewriteMethod = method;
+               }
+               
+               //@Override
+               public override int GetHashCode()
+               {
+                       int prime = 31;
+                       int result = 1;
+                       result = prime * result + System.Convert.ToInt32(GetBoost());
+                       result = prime * result;
+                       result += rewriteMethod.GetHashCode();
+                       return result;
+               }
+               
+               //@Override
+               public  override bool Equals(System.Object obj)
+               {
+                       if (this == obj)
+                               return true;
+                       if (obj == null)
+                               return false;
+                       if (GetType() != obj.GetType())
+                               return false;
+                       MultiTermQuery other = (MultiTermQuery) obj;
+                       if (System.Convert.ToInt32(GetBoost()) != System.Convert.ToInt32(other.GetBoost()))
+                               return false;
+                       if (!rewriteMethod.Equals(other.rewriteMethod))
+                       {
+                               return false;
+                       }
+                       return true;
+               }
+               static MultiTermQuery()
+               {
+                       CONSTANT_SCORE_AUTO_REWRITE_DEFAULT = new AnonymousClassConstantScoreAutoRewrite();
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/MultiTermQueryWrapperFilter.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/MultiTermQueryWrapperFilter.cs
new file mode 100644 (file)
index 0000000..ed9a98b
--- /dev/null
@@ -0,0 +1,250 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+using Term = Mono.Lucene.Net.Index.Term;
+using TermDocs = Mono.Lucene.Net.Index.TermDocs;
+using TermEnum = Mono.Lucene.Net.Index.TermEnum;
+using OpenBitSet = Mono.Lucene.Net.Util.OpenBitSet;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary> A wrapper for {@link MultiTermQuery}, that exposes its
+       /// functionality as a {@link Filter}.
+       /// <p/>
+       /// <code>MultiTermQueryWrapperFilter</code> is not designed to
+       /// be used by itself. Normally you subclass it to provide a Filter
+       /// counterpart for a {@link MultiTermQuery} subclass.
+       /// <p/>
+       /// For example, {@link TermRangeFilter} and {@link PrefixFilter} extend
+       /// <code>MultiTermQueryWrapperFilter</code>.
+       /// This class also provides the functionality behind
+       /// {@link MultiTermQuery#CONSTANT_SCORE_FILTER_REWRITE};
+       /// this is why it is not abstract.
+       /// </summary>
+       [Serializable]
+       public class MultiTermQueryWrapperFilter:Filter
+       {
+               private class AnonymousClassTermGenerator:TermGenerator
+               {
+                       public AnonymousClassTermGenerator(System.Collections.BitArray bitSet, MultiTermQueryWrapperFilter enclosingInstance)
+                       {
+                               InitBlock(bitSet, enclosingInstance);
+                       }
+                       private void  InitBlock(System.Collections.BitArray bitSet, MultiTermQueryWrapperFilter enclosingInstance)
+                       {
+                               this.bitSet = bitSet;
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private System.Collections.BitArray bitSet;
+                       private MultiTermQueryWrapperFilter enclosingInstance;
+                       public MultiTermQueryWrapperFilter Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       public override void  HandleDoc(int doc)
+                       {
+                               bitSet.Set(doc, true);
+                       }
+               }
+
+               private class AnonymousClassTermGenerator1:TermGenerator
+               {
+                       public AnonymousClassTermGenerator1(Mono.Lucene.Net.Util.OpenBitSet bitSet, MultiTermQueryWrapperFilter enclosingInstance)
+                       {
+                               InitBlock(bitSet, enclosingInstance);
+                       }
+                       private void  InitBlock(Mono.Lucene.Net.Util.OpenBitSet bitSet, MultiTermQueryWrapperFilter enclosingInstance)
+                       {
+                               this.bitSet = bitSet;
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private Mono.Lucene.Net.Util.OpenBitSet bitSet;
+                       private MultiTermQueryWrapperFilter enclosingInstance;
+                       public MultiTermQueryWrapperFilter Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       public override void  HandleDoc(int doc)
+                       {
+                               bitSet.Set(doc);
+                       }
+               }
+               
+               protected internal MultiTermQuery query;
+               
+               /// <summary> Wrap a {@link MultiTermQuery} as a Filter.</summary>
+               protected internal MultiTermQueryWrapperFilter(MultiTermQuery query)
+               {
+                       this.query = query;
+               }
+               
+               //@Override
+               public override System.String ToString()
+               {
+                       // query.toString should be ok for the filter, too, if the query boost is 1.0f
+                       return query.ToString();
+               }
+               
+               //@Override
+               public  override bool Equals(System.Object o)
+               {
+                       if (o == this)
+                               return true;
+                       if (o == null)
+                               return false;
+                       if (this.GetType().Equals(o.GetType()))
+                       {
+                               return this.query.Equals(((MultiTermQueryWrapperFilter) o).query);
+                       }
+                       return false;
+               }
+               
+               //@Override
+               public override int GetHashCode()
+               {
+                       return query.GetHashCode();
+               }
+               
+               /// <summary> Expert: Return the number of unique terms visited during execution of the filter.
+               /// If there are many of them, you may consider using another filter type
+               /// or optimize your total term count in index.
+               /// <p/>This method is not thread safe, be sure to only call it when no filter is running!
+               /// If you re-use the same filter instance for another
+               /// search, be sure to first reset the term counter
+               /// with {@link #clearTotalNumberOfTerms}.
+               /// </summary>
+               /// <seealso cref="clearTotalNumberOfTerms">
+               /// </seealso>
+               public virtual int GetTotalNumberOfTerms()
+               {
+                       return query.GetTotalNumberOfTerms();
+               }
+               
+               /// <summary> Expert: Resets the counting of unique terms.
+               /// Do this before executing the filter.
+               /// </summary>
+               /// <seealso cref="getTotalNumberOfTerms">
+               /// </seealso>
+               public virtual void  ClearTotalNumberOfTerms()
+               {
+                       query.ClearTotalNumberOfTerms();
+               }
+               
+               internal abstract class TermGenerator
+               {
+            public virtual void Generate(MultiTermQuery query, IndexReader reader, TermEnum enumerator)
+                       {
+                               int[] docs = new int[32];
+                               int[] freqs = new int[32];
+                               TermDocs termDocs = reader.TermDocs();
+                               try
+                               {
+                                       int termCount = 0;
+                                       do 
+                                       {
+                                               Term term = enumerator.Term();
+                                               if (term == null)
+                                                       break;
+                                               termCount++;
+                                               termDocs.Seek(term);
+                                               while (true)
+                                               {
+                                                       int count = termDocs.Read(docs, freqs);
+                                                       if (count != 0)
+                                                       {
+                                                               for (int i = 0; i < count; i++)
+                                                               {
+                                                                       HandleDoc(docs[i]);
+                                                               }
+                                                       }
+                                                       else
+                                                       {
+                                                               break;
+                                                       }
+                                               }
+                                       }
+                                       while (enumerator.Next());
+                                       
+                                       query.IncTotalNumberOfTerms(termCount); // {{Aroush-2.9}} is the use of 'temp' as is right?
+                               }
+                               finally
+                               {
+                                       termDocs.Close();
+                               }
+                       }
+                       abstract public void  HandleDoc(int doc);
+               }
+               
+               /// <summary> Returns a BitSet with true for documents which should be
+               /// permitted in search results, and false for those that should
+               /// not.
+               /// </summary>
+               /// <deprecated> Use {@link #GetDocIdSet(IndexReader)} instead.
+               /// </deprecated>
+               //@Override
+        [Obsolete("Use GetDocIdSet(IndexReader) instead.")]
+               public override System.Collections.BitArray Bits(IndexReader reader)
+               {
+                       TermEnum enumerator = query.GetEnum(reader);
+                       try
+                       {
+                               System.Collections.BitArray bitSet = new System.Collections.BitArray((reader.MaxDoc() % 64 == 0?reader.MaxDoc() / 64:reader.MaxDoc() / 64 + 1) * 64);
+                               new AnonymousClassTermGenerator(bitSet, this).Generate(query, reader, enumerator);
+                               return bitSet;
+                       }
+                       finally
+                       {
+                               enumerator.Close();
+                       }
+               }
+               
+               /// <summary> Returns a DocIdSet with documents that should be
+               /// permitted in search results.
+               /// </summary>
+               //@Override
+               public override DocIdSet GetDocIdSet(IndexReader reader)
+               {
+                       TermEnum enumerator = query.GetEnum(reader);
+                       try
+                       {
+                               // if current term in enum is null, the enum is empty -> shortcut
+                               if (enumerator.Term() == null)
+                                       return DocIdSet.EMPTY_DOCIDSET;
+                               // else fill into a OpenBitSet
+                               OpenBitSet bitSet = new OpenBitSet(reader.MaxDoc());
+                               new AnonymousClassTermGenerator1(bitSet, this).Generate(query, reader, enumerator);
+                               return bitSet;
+                       }
+                       finally
+                       {
+                               enumerator.Close();
+                       }
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/NumericRangeFilter.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/NumericRangeFilter.cs
new file mode 100644 (file)
index 0000000..e435606
--- /dev/null
@@ -0,0 +1,180 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using NumericTokenStream = Mono.Lucene.Net.Analysis.NumericTokenStream;
+using NumericField = Mono.Lucene.Net.Documents.NumericField;
+using NumericUtils = Mono.Lucene.Net.Util.NumericUtils;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary> A {@link Filter} that only accepts numeric values within
+       /// a specified range. To use this, you must first index the
+       /// numeric values using {@link NumericField} (expert: {@link
+       /// NumericTokenStream}).
+       /// 
+       /// <p/>You create a new NumericRangeFilter with the static
+       /// factory methods, eg:
+       /// 
+       /// <pre>
+       /// Filter f = NumericRangeFilter.newFloatRange("weight",
+       /// new Float(0.3f), new Float(0.10f),
+       /// true, true);
+       /// </pre>
+       /// 
+       /// accepts all documents whose float valued "weight" field
+       /// ranges from 0.3 to 0.10, inclusive.
+       /// See {@link NumericRangeQuery} for details on how Lucene
+       /// indexes and searches numeric valued fields.
+       /// 
+       /// <p/><font color="red"><b>NOTE:</b> This API is experimental and
+       /// might change in incompatible ways in the next
+       /// release.</font>
+       /// 
+       /// </summary>
+       /// <since> 2.9
+       /// 
+       /// </since>
+       [Serializable]
+       public sealed class NumericRangeFilter:MultiTermQueryWrapperFilter
+       {
+               
+               private NumericRangeFilter(NumericRangeQuery query):base(query)
+               {
+               }
+               
+               /// <summary> Factory that creates a <code>NumericRangeFilter</code>, that filters a <code>long</code>
+               /// range using the given <a href="NumericRangeQuery.html#precisionStepDesc"><code>precisionStep</code></a>.
+        /// You can have half-open ranges (which are in fact &lt;/&#8804; or &gt;/&#8805; queries)
+               /// by setting the min or max value to <code>null</code>. By setting inclusive to false, it will
+               /// match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
+               /// </summary>
+               public static NumericRangeFilter NewLongRange(System.String field, int precisionStep, System.ValueType min, System.ValueType max, bool minInclusive, bool maxInclusive)
+               {
+                       return new NumericRangeFilter(NumericRangeQuery.NewLongRange(field, precisionStep, min, max, minInclusive, maxInclusive));
+               }
+               
+               /// <summary> Factory that creates a <code>NumericRangeFilter</code>, that queries a <code>long</code>
+               /// range using the default <code>precisionStep</code> {@link NumericUtils#PRECISION_STEP_DEFAULT} (4).
+        /// You can have half-open ranges (which are in fact &lt;/&#8804; or &gt;/&#8805; queries)
+               /// by setting the min or max value to <code>null</code>. By setting inclusive to false, it will
+               /// match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
+               /// </summary>
+               public static NumericRangeFilter NewLongRange(System.String field, System.ValueType min, System.ValueType max, bool minInclusive, bool maxInclusive)
+               {
+                       return new NumericRangeFilter(NumericRangeQuery.NewLongRange(field, min, max, minInclusive, maxInclusive));
+               }
+               
+               /// <summary> Factory that creates a <code>NumericRangeFilter</code>, that filters a <code>int</code>
+               /// range using the given <a href="NumericRangeQuery.html#precisionStepDesc"><code>precisionStep</code></a>.
+        /// You can have half-open ranges (which are in fact &lt;/&#8804; or &gt;/&#8805; queries)
+               /// by setting the min or max value to <code>null</code>. By setting inclusive to false, it will
+               /// match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
+               /// </summary>
+               public static NumericRangeFilter NewIntRange(System.String field, int precisionStep, System.ValueType min, System.ValueType max, bool minInclusive, bool maxInclusive)
+               {
+                       return new NumericRangeFilter(NumericRangeQuery.NewIntRange(field, precisionStep, min, max, minInclusive, maxInclusive));
+               }
+               
+               /// <summary> Factory that creates a <code>NumericRangeFilter</code>, that queries a <code>int</code>
+               /// range using the default <code>precisionStep</code> {@link NumericUtils#PRECISION_STEP_DEFAULT} (4).
+        /// You can have half-open ranges (which are in fact &lt;/&#8804; or &gt;/&#8805; queries)
+               /// by setting the min or max value to <code>null</code>. By setting inclusive to false, it will
+               /// match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
+               /// </summary>
+               public static NumericRangeFilter NewIntRange(System.String field, System.ValueType min, System.ValueType max, bool minInclusive, bool maxInclusive)
+               {
+                       return new NumericRangeFilter(NumericRangeQuery.NewIntRange(field, min, max, minInclusive, maxInclusive));
+               }
+               
+               /// <summary> Factory that creates a <code>NumericRangeFilter</code>, that filters a <code>double</code>
+               /// range using the given <a href="NumericRangeQuery.html#precisionStepDesc"><code>precisionStep</code></a>.
+        /// You can have half-open ranges (which are in fact &lt;/&#8804; or &gt;/&#8805; queries)
+               /// by setting the min or max value to <code>null</code>. By setting inclusive to false, it will
+               /// match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
+               /// </summary>
+               public static NumericRangeFilter NewDoubleRange(System.String field, int precisionStep, System.Double min, System.Double max, bool minInclusive, bool maxInclusive)
+               {
+                       return new NumericRangeFilter(NumericRangeQuery.NewDoubleRange(field, precisionStep, min, max, minInclusive, maxInclusive));
+               }
+               
+               /// <summary> Factory that creates a <code>NumericRangeFilter</code>, that queries a <code>double</code>
+               /// range using the default <code>precisionStep</code> {@link NumericUtils#PRECISION_STEP_DEFAULT} (4).
+        /// You can have half-open ranges (which are in fact &lt;/&#8804; or &gt;/&#8805; queries)
+               /// by setting the min or max value to <code>null</code>. By setting inclusive to false, it will
+               /// match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
+               /// </summary>
+               public static NumericRangeFilter NewDoubleRange(System.String field, System.Double min, System.Double max, bool minInclusive, bool maxInclusive)
+               {
+                       return new NumericRangeFilter(NumericRangeQuery.NewDoubleRange(field, min, max, minInclusive, maxInclusive));
+               }
+               
+               /// <summary> Factory that creates a <code>NumericRangeFilter</code>, that filters a <code>float</code>
+               /// range using the given <a href="NumericRangeQuery.html#precisionStepDesc"><code>precisionStep</code></a>.
+        /// You can have half-open ranges (which are in fact &lt;/&#8804; or &gt;/&#8805; queries)
+               /// by setting the min or max value to <code>null</code>. By setting inclusive to false, it will
+               /// match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
+               /// </summary>
+               public static NumericRangeFilter NewFloatRange(System.String field, int precisionStep, System.Single min, System.Single max, bool minInclusive, bool maxInclusive)
+               {
+                       return new NumericRangeFilter(NumericRangeQuery.NewFloatRange(field, precisionStep, min, max, minInclusive, maxInclusive));
+               }
+               
+               /// <summary> Factory that creates a <code>NumericRangeFilter</code>, that queries a <code>float</code>
+               /// range using the default <code>precisionStep</code> {@link NumericUtils#PRECISION_STEP_DEFAULT} (4).
+        /// You can have half-open ranges (which are in fact &lt;/&#8804; or &gt;/&#8805; queries)
+               /// by setting the min or max value to <code>null</code>. By setting inclusive to false, it will
+               /// match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
+               /// </summary>
+               public static NumericRangeFilter NewFloatRange(System.String field, System.Single min, System.Single max, bool minInclusive, bool maxInclusive)
+               {
+                       return new NumericRangeFilter(NumericRangeQuery.NewFloatRange(field, min, max, minInclusive, maxInclusive));
+               }
+               
+               /// <summary>Returns the field name for this filter </summary>
+               public System.String GetField()
+               {
+                       return ((NumericRangeQuery) query).GetField();
+               }
+               
+               /// <summary>Returns <code>true</code> if the lower endpoint is inclusive </summary>
+               public bool IncludesMin()
+               {
+                       return ((NumericRangeQuery) query).IncludesMin();
+               }
+               
+               /// <summary>Returns <code>true</code> if the upper endpoint is inclusive </summary>
+               public bool IncludesMax()
+               {
+                       return ((NumericRangeQuery) query).IncludesMax();
+               }
+               
+               /// <summary>Returns the lower value of this range filter </summary>
+               public System.ValueType GetMin()
+               {
+                       return ((NumericRangeQuery) query).GetMin();
+               }
+               
+               /// <summary>Returns the upper value of this range filter </summary>
+               public System.ValueType GetMax()
+               {
+                       return ((NumericRangeQuery) query).GetMax();
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/NumericRangeQuery.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/NumericRangeQuery.cs
new file mode 100644 (file)
index 0000000..c110a34
--- /dev/null
@@ -0,0 +1,640 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using NumericTokenStream = Mono.Lucene.Net.Analysis.NumericTokenStream;
+using NumericField = Mono.Lucene.Net.Documents.NumericField;
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+using Term = Mono.Lucene.Net.Index.Term;
+using NumericUtils = Mono.Lucene.Net.Util.NumericUtils;
+using StringHelper = Mono.Lucene.Net.Util.StringHelper;
+using ToStringUtils = Mono.Lucene.Net.Util.ToStringUtils;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary> <p/>A {@link Query} that matches numeric values within a
+       /// specified range.  To use this, you must first index the
+       /// numeric values using {@link NumericField} (expert: {@link
+       /// NumericTokenStream}).  If your terms are instead textual,
+       /// you should use {@link TermRangeQuery}.  {@link
+       /// NumericRangeFilter} is the filter equivalent of this
+       /// query.<p/>
+       /// 
+       /// <p/>You create a new NumericRangeQuery with the static
+       /// factory methods, eg:
+       /// 
+       /// <pre>
+       /// Query q = NumericRangeQuery.newFloatRange("weight",
+       /// new Float(0.3f), new Float(0.10f),
+       /// true, true);
+       /// </pre>
+       /// 
+       /// matches all documents whose float valued "weight" field
+       /// ranges from 0.3 to 0.10, inclusive.
+       /// 
+       /// <p/>The performance of NumericRangeQuery is much better
+       /// than the corresponding {@link TermRangeQuery} because the
+       /// number of terms that must be searched is usually far
+       /// fewer, thanks to trie indexing, described below.<p/>
+       /// 
+       /// <p/>You can optionally specify a <a
+       /// href="#precisionStepDesc"><code>precisionStep</code></a>
+       /// when creating this query.  This is necessary if you've
+       /// changed this configuration from its default (4) during
+       /// indexing.  Lower values consume more disk space but speed
+       /// up searching.  Suitable values are between <b>1</b> and
+       /// <b>8</b>. A good starting point to test is <b>4</b>,
+       /// which is the default value for all <code>Numeric*</code>
+       /// classes.  See <a href="#precisionStepDesc">below</a> for
+       /// details.
+       /// 
+       /// <p/>This query defaults to {@linkplain
+       /// MultiTermQuery#CONSTANT_SCORE_AUTO_REWRITE_DEFAULT} for
+       /// 32 bit (int/float) ranges with precisionStep &lt;8 and 64
+       /// bit (long/double) ranges with precisionStep &lt;6.
+       /// Otherwise it uses {@linkplain
+       /// MultiTermQuery#CONSTANT_SCORE_FILTER_REWRITE} as the
+       /// number of terms is likely to be high.  With precision
+       /// steps of &lt;4, this query can be run with one of the
+       /// BooleanQuery rewrite methods without changing
+       /// BooleanQuery's default max clause count.
+       /// 
+       /// <p/><font color="red"><b>NOTE:</b> This API is experimental and
+       /// might change in incompatible ways in the next release.</font>
+       /// 
+       /// <br/><h3>How it works</h3>
+       /// 
+       /// <p/>See the publication about <a target="_blank" href="http://www.panfmp.org">panFMP</a>,
+       /// where this algorithm was described (referred to as <code>TrieRangeQuery</code>):
+       /// 
+       /// <blockquote><strong>Schindler, U, Diepenbroek, M</strong>, 2008.
+       /// <em>Generic XML-based Framework for Metadata Portals.</em>
+       /// Computers &amp; Geosciences 34 (12), 1947-1955.
+       /// <a href="http://dx.doi.org/10.1016/j.cageo.2008.02.023"
+       /// target="_blank">doi:10.1016/j.cageo.2008.02.023</a></blockquote>
+       /// 
+       /// <p/><em>A quote from this paper:</em> Because Apache Lucene is a full-text
+       /// search engine and not a conventional database, it cannot handle numerical ranges
+       /// (e.g., field value is inside user defined bounds, even dates are numerical values).
+       /// We have developed an extension to Apache Lucene that stores
+       /// the numerical values in a special string-encoded format with variable precision
+       /// (all numerical values like doubles, longs, floats, and ints are converted to
+       /// lexicographic sortable string representations and stored with different precisions
+       /// (for a more detailed description of how the values are stored,
+       /// see {@link NumericUtils}). A range is then divided recursively into multiple intervals for searching:
+       /// The center of the range is searched only with the lowest possible precision in the <em>trie</em>,
+       /// while the boundaries are matched more exactly. This reduces the number of terms dramatically.<p/>
+       /// 
+       /// <p/>For the variant that stores long values in 8 different precisions (each reduced by 8 bits) that
+       /// uses a lowest precision of 1 byte, the index contains only a maximum of 256 distinct values in the
+       /// lowest precision. Overall, a range could consist of a theoretical maximum of
+       /// <code>7*255*2 + 255 = 3825</code> distinct terms (when there is a term for every distinct value of an
+       /// 8-byte-number in the index and the range covers almost all of them; a maximum of 255 distinct values is used
+       /// because it would always be possible to reduce the full 256 values to one term with degraded precision).
+       /// In practice, we have seen up to 300 terms in most cases (index with 500,000 metadata records
+       /// and a uniform value distribution).<p/>
+       /// 
+       /// <a name="precisionStepDesc"/><h3>Precision Step</h3>
+       /// <p/>You can choose any <code>precisionStep</code> when encoding values.
+       /// Lower step values mean more precisions and so more terms in index (and index gets larger).
+       /// On the other hand, the maximum number of terms to match reduces, which optimized query speed.
+       /// The formula to calculate the maximum term count is:
+       /// <pre>
+       /// n = [ (bitsPerValue/precisionStep - 1) * (2^precisionStep - 1 ) * 2 ] + (2^precisionStep - 1 )
+       /// </pre>
+       /// <p/><em>(this formula is only correct, when <code>bitsPerValue/precisionStep</code> is an integer;
+       /// in other cases, the value must be rounded up and the last summand must contain the modulo of the division as
+       /// precision step)</em>.
+       /// For longs stored using a precision step of 4, <code>n = 15*15*2 + 15 = 465</code>, and for a precision
+       /// step of 2, <code>n = 31*3*2 + 3 = 189</code>. But the faster search speed is reduced by more seeking
+       /// in the term enum of the index. Because of this, the ideal <code>precisionStep</code> value can only
+       /// be found out by testing. <b>Important:</b> You can index with a lower precision step value and test search speed
+       /// using a multiple of the original step value.<p/>
+       /// 
+       /// <p/>Good values for <code>precisionStep</code> are depending on usage and data type:
+       /// <ul>
+       /// <li>The default for all data types is <b>4</b>, which is used, when no <code>precisionStep</code> is given.</li>
+       /// <li>Ideal value in most cases for <em>64 bit</em> data types <em>(long, double)</em> is <b>6</b> or <b>8</b>.</li>
+       /// <li>Ideal value in most cases for <em>32 bit</em> data types <em>(int, float)</em> is <b>4</b>.</li>
+       /// <li>Steps <b>&gt;64</b> for <em>long/double</em> and <b>&gt;32</b> for <em>int/float</em> produces one token
+       /// per value in the index and querying is as slow as a conventional {@link TermRangeQuery}. But it can be used
+       /// to produce fields, that are solely used for sorting (in this case simply use {@link Integer#MAX_VALUE} as
+       /// <code>precisionStep</code>). Using {@link NumericField NumericFields} for sorting
+       /// is ideal, because building the field cache is much faster than with text-only numbers.
+       /// Sorting is also possible with range query optimized fields using one of the above <code>precisionSteps</code>.</li>
+       /// </ul>
+       /// 
+       /// <p/>Comparisons of the different types of RangeQueries on an index with about 500,000 docs showed
+       /// that {@link TermRangeQuery} in boolean rewrite mode (with raised {@link BooleanQuery} clause count)
+       /// took about 30-40 secs to complete, {@link TermRangeQuery} in constant score filter rewrite mode took 5 secs
+       /// and executing this class took &lt;100ms to complete (on an Opteron64 machine, Java 1.5, 8 bit
+       /// precision step). This query type was developed for a geographic portal, where the performance for
+       /// e.g. bounding boxes or exact date/time stamps is important.<p/>
+       /// 
+       /// </summary>
+       /// <since> 2.9
+       /// 
+       /// </since>
+       [Serializable]
+       public sealed class NumericRangeQuery:MultiTermQuery
+       {
+               
+               private NumericRangeQuery(System.String field, int precisionStep, int valSize, System.ValueType min, System.ValueType max, bool minInclusive, bool maxInclusive)
+               {
+                       System.Diagnostics.Debug.Assert((valSize == 32 || valSize == 64));
+                       if (precisionStep < 1)
+                               throw new System.ArgumentException("precisionStep must be >=1");
+                       this.field = StringHelper.Intern(field);
+                       this.precisionStep = precisionStep;
+                       this.valSize = valSize;
+                       this.min = min;
+                       this.max = max;
+                       this.minInclusive = minInclusive;
+                       this.maxInclusive = maxInclusive;
+                       
+                       // For bigger precisionSteps this query likely
+                       // hits too many terms, so set to CONSTANT_SCORE_FILTER right off
+                       // (especially as the FilteredTermEnum is costly if wasted only for AUTO tests because it
+                       // creates new enums from IndexReader for each sub-range)
+                       switch (valSize)
+                       {
+                               
+                               case 64: 
+                                       SetRewriteMethod((precisionStep > 6)?CONSTANT_SCORE_FILTER_REWRITE:CONSTANT_SCORE_AUTO_REWRITE_DEFAULT);
+                                       break;
+                               
+                               case 32: 
+                                       SetRewriteMethod((precisionStep > 8)?CONSTANT_SCORE_FILTER_REWRITE:CONSTANT_SCORE_AUTO_REWRITE_DEFAULT);
+                                       break;
+                               
+                               default: 
+                                       // should never happen
+                                       throw new System.ArgumentException("valSize must be 32 or 64");
+                               
+                       }
+                       
+                       // shortcut if upper bound == lower bound
+                       if (min != null && min.Equals(max))
+                       {
+                               SetRewriteMethod(CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE);
+                       }
+               }
+               
+               /// <summary> Factory that creates a <code>NumericRangeQuery</code>, that queries a <code>long</code>
+               /// range using the given <a href="#precisionStepDesc"><code>precisionStep</code></a>.
+        /// You can have half-open ranges (which are in fact &lt;/&#8804; or &gt;/&#8805; queries)
+               /// by setting the min or max value to <code>null</code>. By setting inclusive to false, it will
+               /// match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
+               /// </summary>
+               public static NumericRangeQuery NewLongRange(System.String field, int precisionStep, System.ValueType min, System.ValueType max, bool minInclusive, bool maxInclusive)
+               {
+                       return new NumericRangeQuery(field, precisionStep, 64, min, max, minInclusive, maxInclusive);
+               }
+               
+               /// <summary> Factory that creates a <code>NumericRangeQuery</code>, that queries a <code>long</code>
+               /// range using the default <code>precisionStep</code> {@link NumericUtils#PRECISION_STEP_DEFAULT} (4).
+        /// You can have half-open ranges (which are in fact &lt;/&#8804; or &gt;/&#8805; queries)
+               /// by setting the min or max value to <code>null</code>. By setting inclusive to false, it will
+               /// match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
+               /// </summary>
+               public static NumericRangeQuery NewLongRange(System.String field, System.ValueType min, System.ValueType max, bool minInclusive, bool maxInclusive)
+               {
+                       return new NumericRangeQuery(field, NumericUtils.PRECISION_STEP_DEFAULT, 64, min, max, minInclusive, maxInclusive);
+               }
+               
+               /// <summary> Factory that creates a <code>NumericRangeQuery</code>, that queries a <code>int</code>
+               /// range using the given <a href="#precisionStepDesc"><code>precisionStep</code></a>.
+        /// You can have half-open ranges (which are in fact &lt;/&#8804; or &gt;/&#8805; queries)
+               /// by setting the min or max value to <code>null</code>. By setting inclusive to false, it will
+               /// match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
+               /// </summary>
+               public static NumericRangeQuery NewIntRange(System.String field, int precisionStep, System.ValueType min, System.ValueType max, bool minInclusive, bool maxInclusive)
+               {
+                       return new NumericRangeQuery(field, precisionStep, 32, min, max, minInclusive, maxInclusive);
+               }
+               
+               /// <summary> Factory that creates a <code>NumericRangeQuery</code>, that queries a <code>int</code>
+               /// range using the default <code>precisionStep</code> {@link NumericUtils#PRECISION_STEP_DEFAULT} (4).
+        /// You can have half-open ranges (which are in fact &lt;/&#8804; or &gt;/&#8805; queries)
+               /// by setting the min or max value to <code>null</code>. By setting inclusive to false, it will
+               /// match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
+               /// </summary>
+               public static NumericRangeQuery NewIntRange(System.String field, System.ValueType min, System.ValueType max, bool minInclusive, bool maxInclusive)
+               {
+                       return new NumericRangeQuery(field, NumericUtils.PRECISION_STEP_DEFAULT, 32, min, max, minInclusive, maxInclusive);
+               }
+               
+               /// <summary> Factory that creates a <code>NumericRangeQuery</code>, that queries a <code>double</code>
+               /// range using the given <a href="#precisionStepDesc"><code>precisionStep</code></a>.
+        /// You can have half-open ranges (which are in fact &lt;/&#8804; or &gt;/&#8805; queries)
+               /// by setting the min or max value to <code>null</code>. By setting inclusive to false, it will
+               /// match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
+               /// </summary>
+               public static NumericRangeQuery NewDoubleRange(System.String field, int precisionStep, System.Double min, System.Double max, bool minInclusive, bool maxInclusive)
+               {
+                       return new NumericRangeQuery(field, precisionStep, 64, min, max, minInclusive, maxInclusive);
+               }
+               
+               /// <summary> Factory that creates a <code>NumericRangeQuery</code>, that queries a <code>double</code>
+               /// range using the default <code>precisionStep</code> {@link NumericUtils#PRECISION_STEP_DEFAULT} (4).
+        /// You can have half-open ranges (which are in fact &lt;/&#8804; or &gt;/&#8805; queries)
+               /// by setting the min or max value to <code>null</code>. By setting inclusive to false, it will
+               /// match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
+               /// </summary>
+               public static NumericRangeQuery NewDoubleRange(System.String field, System.Double min, System.Double max, bool minInclusive, bool maxInclusive)
+               {
+                       return new NumericRangeQuery(field, NumericUtils.PRECISION_STEP_DEFAULT, 64, min, max, minInclusive, maxInclusive);
+               }
+               
+               /// <summary> Factory that creates a <code>NumericRangeQuery</code>, that queries a <code>float</code>
+               /// range using the given <a href="#precisionStepDesc"><code>precisionStep</code></a>.
+        /// You can have half-open ranges (which are in fact &lt;/&#8804; or &gt;/&#8805; queries)
+               /// by setting the min or max value to <code>null</code>. By setting inclusive to false, it will
+               /// match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
+               /// </summary>
+               public static NumericRangeQuery NewFloatRange(System.String field, int precisionStep, System.Single min, System.Single max, bool minInclusive, bool maxInclusive)
+               {
+                       return new NumericRangeQuery(field, precisionStep, 32, min, max, minInclusive, maxInclusive);
+               }
+               
+               /// <summary> Factory that creates a <code>NumericRangeQuery</code>, that queries a <code>float</code>
+               /// range using the default <code>precisionStep</code> {@link NumericUtils#PRECISION_STEP_DEFAULT} (4).
+        /// You can have half-open ranges (which are in fact &lt;/&#8804; or &gt;/&#8805; queries)
+               /// by setting the min or max value to <code>null</code>. By setting inclusive to false, it will
+               /// match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
+               /// </summary>
+               public static NumericRangeQuery NewFloatRange(System.String field, System.Single min, System.Single max, bool minInclusive, bool maxInclusive)
+               {
+                       return new NumericRangeQuery(field, NumericUtils.PRECISION_STEP_DEFAULT, 32, min, max, minInclusive, maxInclusive);
+               }
+               
+               //@Override
+               public /*protected internal*/ override FilteredTermEnum GetEnum(IndexReader reader)
+               {
+                       return new NumericRangeTermEnum(this, reader);
+               }
+               
+               /// <summary>Returns the field name for this query </summary>
+               public System.String GetField()
+               {
+                       return field;
+               }
+               
+               /// <summary>Returns <code>true</code> if the lower endpoint is inclusive </summary>
+               public bool IncludesMin()
+               {
+                       return minInclusive;
+               }
+               
+               /// <summary>Returns <code>true</code> if the upper endpoint is inclusive </summary>
+               public bool IncludesMax()
+               {
+                       return maxInclusive;
+               }
+               
+               /// <summary>Returns the lower value of this range query </summary>
+               public System.ValueType GetMin()
+               {
+                       return min;
+               }
+               
+               /// <summary>Returns the upper value of this range query </summary>
+               public System.ValueType GetMax()
+               {
+                       return max;
+               }
+               
+               //@Override
+               public override System.String ToString(System.String field)
+               {
+                       System.Text.StringBuilder sb = new System.Text.StringBuilder();
+                       if (!this.field.Equals(field))
+                               sb.Append(this.field).Append(':');
+                       return sb.Append(minInclusive?'[':'{').Append((min == null)?"*":min.ToString()).Append(" TO ").Append((max == null)?"*":max.ToString()).Append(maxInclusive?']':'}').Append(ToStringUtils.Boost(GetBoost())).ToString();
+               }
+               
+               //@Override
+               public  override bool Equals(System.Object o)
+               {
+                       if (o == this)
+                               return true;
+                       if (!base.Equals(o))
+                               return false;
+                       if (o is NumericRangeQuery)
+                       {
+                               NumericRangeQuery q = (NumericRangeQuery) o;
+                               return ((System.Object) field == (System.Object) q.field && (q.min == null?min == null:q.min.Equals(min)) && (q.max == null?max == null:q.max.Equals(max)) && minInclusive == q.minInclusive && maxInclusive == q.maxInclusive && precisionStep == q.precisionStep);
+                       }
+                       return false;
+               }
+               
+               //@Override
+               public override int GetHashCode()
+               {
+                       int hash = base.GetHashCode();
+                       hash += (field.GetHashCode() ^ 0x4565fd66 + precisionStep ^ 0x64365465);
+                       if (min != null)
+                               hash += (min.GetHashCode() ^ 0x14fa55fb);
+                       if (max != null)
+                               hash += (max.GetHashCode() ^ 0x733fa5fe);
+                       return hash + (minInclusive.GetHashCode() ^ 0x14fa55fb) + (maxInclusive.GetHashCode() ^ 0x733fa5fe);
+               }
+
+         // field must be interned after reading from stream
+        //private void ReadObject(java.io.ObjectInputStream in) 
+        //{
+        //    in.defaultReadObject();
+        //    field = StringHelper.intern(field);
+        //}
+
+
+        [System.Runtime.Serialization.OnDeserialized]
+        internal void OnDeserialized(System.Runtime.Serialization.StreamingContext context)
+        {
+            field = StringHelper.Intern(field);
+        }
+               
+               // members (package private, to be also fast accessible by NumericRangeTermEnum)
+               internal System.String field;
+               internal int precisionStep;
+               internal int valSize;
+               internal System.ValueType min;
+               internal System.ValueType max;
+               internal bool minInclusive;
+               internal bool maxInclusive;
+               
+               /// <summary> Subclass of FilteredTermEnum for enumerating all terms that match the
+               /// sub-ranges for trie range queries.
+               /// <p/>
+               /// WARNING: This term enumeration is not guaranteed to be always ordered by
+               /// {@link Term#compareTo}.
+               /// The ordering depends on how {@link NumericUtils#splitLongRange} and
+               /// {@link NumericUtils#splitIntRange} generates the sub-ranges. For
+               /// {@link MultiTermQuery} ordering is not relevant.
+               /// </summary>
+               private sealed class NumericRangeTermEnum:FilteredTermEnum
+               {
+                       private class AnonymousClassLongRangeBuilder:NumericUtils.LongRangeBuilder
+                       {
+                               public AnonymousClassLongRangeBuilder(NumericRangeTermEnum enclosingInstance)
+                               {
+                                       InitBlock(enclosingInstance);
+                               }
+                               private void  InitBlock(NumericRangeTermEnum enclosingInstance)
+                               {
+                                       this.enclosingInstance = enclosingInstance;
+                               }
+                               private NumericRangeTermEnum enclosingInstance;
+                               public NumericRangeTermEnum Enclosing_Instance
+                               {
+                                       get
+                                       {
+                                               return enclosingInstance;
+                                       }
+                                       
+                               }
+                               //@Override
+                               public override void  AddRange(System.String minPrefixCoded, System.String maxPrefixCoded)
+                               {
+                                       Enclosing_Instance.rangeBounds.Add(minPrefixCoded);
+                                       Enclosing_Instance.rangeBounds.Add(maxPrefixCoded);
+                               }
+                       }
+                       private class AnonymousClassIntRangeBuilder:NumericUtils.IntRangeBuilder
+                       {
+                               public AnonymousClassIntRangeBuilder(NumericRangeTermEnum enclosingInstance)
+                               {
+                                       InitBlock(enclosingInstance);
+                               }
+                               private void  InitBlock(NumericRangeTermEnum enclosingInstance)
+                               {
+                                       this.enclosingInstance = enclosingInstance;
+                               }
+                               private NumericRangeTermEnum enclosingInstance;
+                               public NumericRangeTermEnum Enclosing_Instance
+                               {
+                                       get
+                                       {
+                                               return enclosingInstance;
+                                       }
+                                       
+                               }
+                               //@Override
+                               public override void  AddRange(System.String minPrefixCoded, System.String maxPrefixCoded)
+                               {
+                                       Enclosing_Instance.rangeBounds.Add(minPrefixCoded);
+                                       Enclosing_Instance.rangeBounds.Add(maxPrefixCoded);
+                               }
+                       }
+                       private void  InitBlock(NumericRangeQuery enclosingInstance)
+                       {
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private NumericRangeQuery enclosingInstance;
+                       public NumericRangeQuery Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       
+                       private IndexReader reader;
+                       private System.Collections.ArrayList rangeBounds = new System.Collections.ArrayList();
+                       private System.String currentUpperBound = null;
+                       
+                       internal NumericRangeTermEnum(NumericRangeQuery enclosingInstance, IndexReader reader)
+                       {
+                               InitBlock(enclosingInstance);
+                               this.reader = reader;
+                               
+                               switch (Enclosing_Instance.valSize)
+                               {
+                                       
+                                       case 64:  {
+                                                       // lower
+                                                       long minBound = System.Int64.MinValue;
+                                                       if (Enclosing_Instance.min is System.Int64)
+                                                       {
+                                                               minBound = System.Convert.ToInt64(Enclosing_Instance.min);
+                                                       }
+                                                       else if (Enclosing_Instance.min is System.Double)
+                                                       {
+                                                               minBound = NumericUtils.DoubleToSortableLong(System.Convert.ToDouble(Enclosing_Instance.min));
+                                                       }
+                                                       if (!Enclosing_Instance.minInclusive && Enclosing_Instance.min != null)
+                                                       {
+                                                               if (minBound == System.Int64.MaxValue)
+                                                                       break;
+                                                               minBound++;
+                                                       }
+                                                       
+                                                       // upper
+                                                       long maxBound = System.Int64.MaxValue;
+                                                       if (Enclosing_Instance.max is System.Int64)
+                                                       {
+                                                               maxBound = System.Convert.ToInt64(Enclosing_Instance.max);
+                                                       }
+                                                       else if (Enclosing_Instance.max is System.Double)
+                                                       {
+                                                               maxBound = NumericUtils.DoubleToSortableLong(System.Convert.ToDouble(Enclosing_Instance.max));
+                                                       }
+                                                       if (!Enclosing_Instance.maxInclusive && Enclosing_Instance.max != null)
+                                                       {
+                                                               if (maxBound == System.Int64.MinValue)
+                                                                       break;
+                                                               maxBound--;
+                                                       }
+                                                       
+                                                       NumericUtils.SplitLongRange(new AnonymousClassLongRangeBuilder(this), Enclosing_Instance.precisionStep, minBound, maxBound);
+                                                       break;
+                                               }
+                                       
+                                       
+                                       case 32:  {
+                                                       // lower
+                                                       int minBound = System.Int32.MinValue;
+                                                       if (Enclosing_Instance.min is System.Int32)
+                                                       {
+                                                               minBound = System.Convert.ToInt32(Enclosing_Instance.min);
+                                                       }
+                                                       else if (Enclosing_Instance.min is System.Single)
+                                                       {
+                                                               minBound = NumericUtils.FloatToSortableInt(System.Convert.ToSingle(Enclosing_Instance.min));
+                                                       }
+                                                       if (!Enclosing_Instance.minInclusive && Enclosing_Instance.min != null)
+                                                       {
+                                                               if (minBound == System.Int32.MaxValue)
+                                                                       break;
+                                                               minBound++;
+                                                       }
+                                                       
+                                                       // upper
+                                                       int maxBound = System.Int32.MaxValue;
+                                                       if (Enclosing_Instance.max is System.Int32)
+                                                       {
+                                                               maxBound = System.Convert.ToInt32(Enclosing_Instance.max);
+                                                       }
+                                                       else if (Enclosing_Instance.max is System.Single)
+                                                       {
+                                                               maxBound = NumericUtils.FloatToSortableInt(System.Convert.ToSingle(Enclosing_Instance.max));
+                                                       }
+                                                       if (!Enclosing_Instance.maxInclusive && Enclosing_Instance.max != null)
+                                                       {
+                                                               if (maxBound == System.Int32.MinValue)
+                                                                       break;
+                                                               maxBound--;
+                                                       }
+                                                       
+                                                       NumericUtils.SplitIntRange(new AnonymousClassIntRangeBuilder(this), Enclosing_Instance.precisionStep, minBound, maxBound);
+                                                       break;
+                                               }
+                                       
+                                       
+                                       default: 
+                                               // should never happen
+                                               throw new System.ArgumentException("valSize must be 32 or 64");
+                                       
+                               }
+                               
+                               // seek to first term
+                               Next();
+                       }
+                       
+                       //@Override
+                       public override float Difference()
+                       {
+                               return 1.0f;
+                       }
+                       
+                       /// <summary>this is a dummy, it is not used by this class. </summary>
+                       //@Override
+                       public override bool EndEnum()
+                       {
+                               System.Diagnostics.Debug.Assert(false); // should never be called
+                               return (currentTerm != null);
+                       }
+                       
+                       /// <summary> Compares if current upper bound is reached,
+                       /// this also updates the term count for statistics.
+                       /// In contrast to {@link FilteredTermEnum}, a return value
+                       /// of <code>false</code> ends iterating the current enum
+                       /// and forwards to the next sub-range.
+                       /// </summary>
+                       //@Override
+                       public /*protected internal*/ override bool TermCompare(Term term)
+                       {
+                               return ((System.Object) term.Field() == (System.Object) Enclosing_Instance.field && String.CompareOrdinal(term.Text(), currentUpperBound) <= 0);
+                       }
+                       
+                       /// <summary>Increments the enumeration to the next element.  True if one exists. </summary>
+                       //@Override
+                       public override bool Next()
+                       {
+                               // if a current term exists, the actual enum is initialized:
+                               // try change to next term, if no such term exists, fall-through
+                               if (currentTerm != null)
+                               {
+                                       System.Diagnostics.Debug.Assert(actualEnum != null);
+                                       if (actualEnum.Next())
+                                       {
+                                               currentTerm = actualEnum.Term();
+                                               if (TermCompare(currentTerm))
+                                                       return true;
+                                       }
+                               }
+                               // if all above fails, we go forward to the next enum,
+                               // if one is available
+                               currentTerm = null;
+                               if (rangeBounds.Count < 2)
+                                       return false;
+                               // close the current enum and read next bounds
+                               if (actualEnum != null)
+                               {
+                                       actualEnum.Close();
+                                       actualEnum = null;
+                               }
+                               System.Object tempObject;
+                               tempObject = rangeBounds[0];
+                               rangeBounds.RemoveAt(0);
+                               System.String lowerBound = (System.String) tempObject;
+                               System.Object tempObject2;
+                               tempObject2 = rangeBounds[0];
+                               rangeBounds.RemoveAt(0);
+                               this.currentUpperBound = ((System.String) tempObject2);
+                               // this call recursively uses next(), if no valid term in
+                               // next enum found.
+                               // if this behavior is changed/modified in the superclass,
+                               // this enum will not work anymore!
+                               SetEnum(reader.Terms(new Term(Enclosing_Instance.field, lowerBound)));
+                               return (currentTerm != null);
+                       }
+                       
+                       /// <summary>Closes the enumeration to further activity, freeing resources.  </summary>
+                       //@Override
+                       public override void  Close()
+                       {
+                               rangeBounds.Clear();
+                               currentUpperBound = null;
+                               base.Close();
+                       }
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Package.html b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Package.html
new file mode 100644 (file)
index 0000000..f4d2ad7
--- /dev/null
@@ -0,0 +1,389 @@
+<!doctype html public "-//w3c//dtd html 4.0 transitional//en">\r
+<!--\r
+ Licensed to the Apache Software Foundation (ASF) under one or more\r
+ contributor license agreements.  See the NOTICE file distributed with\r
+ this work for additional information regarding copyright ownership.\r
+ The ASF licenses this file to You under the Apache License, Version 2.0\r
+ (the "License"); you may not use this file except in compliance with\r
+ the License.  You may obtain a copy of the License at\r
+\r
+     http://www.apache.org/licenses/LICENSE-2.0\r
+\r
+ Unless required by applicable law or agreed to in writing, software\r
+ distributed under the License is distributed on an "AS IS" BASIS,\r
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
+ See the License for the specific language governing permissions and\r
+ limitations under the License.\r
+-->\r
+<html>\r
+<head>\r
+   <meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">\r
+</head>\r
+<body>\r
+Code to search indices.\r
+\r
+<h2>Table Of Contents</h2>\r
+<p>\r
+    <ol>\r
+        <li><a href = "#search">Search Basics</a></li>\r
+        <li><a href = "#query">The Query Classes</a></li>\r
+        <li><a href = "#scoring">Changing the Scoring</a></li>\r
+    </ol>\r
+</p>\r
+<a name = "search"></a>\r
+<h2>Search</h2>\r
+<p>\r
+Search over indices.\r
+\r
+Applications usually call {@link\r
+Lucene.Net.Search.Searcher#search(Query)} or {@link\r
+Lucene.Net.Search.Searcher#search(Query,Filter)}.\r
+\r
+    <!-- FILL IN MORE HERE -->   \r
+</p>\r
+<a name = "query"></a>\r
+<h2>Query Classes</h2>\r
+<h4>\r
+    <a href = "TermQuery.html">TermQuery</a>\r
+</h4>\r
+\r
+<p>Of the various implementations of\r
+    <a href = "Query.html">Query</a>, the\r
+    <a href = "TermQuery.html">TermQuery</a>\r
+    is the easiest to understand and the most often used in applications. A <a\r
+        href="TermQuery.html">TermQuery</a> matches all the documents that contain the\r
+    specified\r
+    <a href = "index/Term.html">Term</a>,\r
+    which is a word that occurs in a certain\r
+    <a href = "document/Field.html">Field</a>.\r
+    Thus, a <a href = "TermQuery.html">TermQuery</a> identifies and scores all\r
+    <a href = "document/Document.html">Document</a>s that have a <a\r
+        href="../document/Field.html">Field</a> with the specified string in it.\r
+    Constructing a <a\r
+        href="TermQuery.html">TermQuery</a>\r
+    is as simple as:\r
+    <pre>\r
+        TermQuery tq = new TermQuery(new Term("fieldName", "term"));\r
+    </pre>In this example, the <a href = "Query.html">Query</a> identifies all <a\r
+        href="../document/Document.html">Document</a>s that have the <a\r
+        href="../document/Field.html">Field</a> named <tt>"fieldName"</tt>\r
+    containing the word <tt>"term"</tt>.\r
+</p>\r
+<h4>\r
+    <a href = "BooleanQuery.html">BooleanQuery</a>\r
+</h4>\r
+\r
+<p>Things start to get interesting when one combines multiple\r
+    <a href = "TermQuery.html">TermQuery</a> instances into a <a\r
+        href="BooleanQuery.html">BooleanQuery</a>.\r
+    A <a href = "BooleanQuery.html">BooleanQuery</a> contains multiple\r
+    <a href = "BooleanClause.html">BooleanClause</a>s,\r
+    where each clause contains a sub-query (<a href = "Query.html">Query</a>\r
+    instance) and an operator (from <a\r
+        href="BooleanClause.Occur.html">BooleanClause.Occur</a>)\r
+    describing how that sub-query is combined with the other clauses:\r
+    <ol>\r
+\r
+        <li><p>SHOULD &mdash; Use this operator when a clause can occur in the result set, but is not required.\r
+            If a query is made up of all SHOULD clauses, then every document in the result\r
+            set matches at least one of these clauses.</p></li>\r
+\r
+        <li><p>MUST &mdash; Use this operator when a clause is required to occur in the result set. Every\r
+            document in the result set will match\r
+            all such clauses.</p></li>\r
+\r
+        <li><p>MUST NOT &mdash; Use this operator when a\r
+            clause must not occur in the result set. No\r
+            document in the result set will match\r
+            any such clauses.</p></li>\r
+    </ol>\r
+    Boolean queries are constructed by adding two or more\r
+    <a href = "BooleanClause.html">BooleanClause</a>\r
+    instances. If too many clauses are added, a <a href = "BooleanQuery.TooManyClauses.html">TooManyClauses</a>\r
+    exception will be thrown during searching. This most often occurs\r
+    when a <a href = "Query.html">Query</a>\r
+    is rewritten into a <a href = "BooleanQuery.html">BooleanQuery</a> with many\r
+    <a href = "TermQuery.html">TermQuery</a> clauses,\r
+    for example by <a href = "WildcardQuery.html">WildcardQuery</a>.\r
+    The default setting for the maximum number\r
+    of clauses 1024, but this can be changed via the\r
+    static method <a href = "BooleanQuery.html#setMaxClauseCount(int)">setMaxClauseCount</a>\r
+    in <a href = "BooleanQuery.html">BooleanQuery</a>.\r
+</p>\r
+\r
+<h4>Phrases</h4>\r
+\r
+<p>Another common search is to find documents containing certain phrases. This\r
+    is handled two different ways:\r
+    <ol>\r
+        <li>\r
+            <p><a href = "PhraseQuery.html">PhraseQuery</a>\r
+                &mdash; Matches a sequence of\r
+                <a href = "index/Term.html">Terms</a>.\r
+                <a href = "PhraseQuery.html">PhraseQuery</a> uses a slop factor to determine\r
+                how many positions may occur between any two terms in the phrase and still be considered a match.</p>\r
+        </li>\r
+        <li>\r
+            <p><a href = "spans/SpanNearQuery.html">SpanNearQuery</a>\r
+                &mdash; Matches a sequence of other\r
+                <a href = "spans/SpanQuery.html">SpanQuery</a>\r
+                instances. <a href = "spans/SpanNearQuery.html">SpanNearQuery</a> allows for\r
+                much more\r
+                complicated phrase queries since it is constructed from other <a\r
+                    href="spans/SpanQuery.html">SpanQuery</a>\r
+                instances, instead of only <a href = "TermQuery.html">TermQuery</a>\r
+                instances.</p>\r
+        </li>\r
+    </ol>\r
+</p>\r
+\r
+<h4>\r
+    <a href = "TermRangeQuery.html">TermRangeQuery</a>\r
+</h4>\r
+\r
+<p>The\r
+    <a href = "TermRangeQuery.html">TermRangeQuery</a>\r
+    matches all documents that occur in the\r
+    exclusive range of a lower\r
+    <a href = "index/Term.html">Term</a>\r
+    and an upper\r
+    <a href = "index/Term.html">Term</a>.\r
+    according to {@link java.lang.String#compareTo(String)}. It is not intended\r
+    for numerical ranges, use <a href = "NumericRangeQuery.html">NumericRangeQuery</a> instead.\r
+\r
+    For example, one could find all documents\r
+    that have terms beginning with the letters <tt>a</tt> through <tt>c</tt>. This type of <a\r
+        href="Query.html">Query</a> is frequently used to\r
+    find\r
+    documents that occur in a specific date range.\r
+</p>\r
+\r
+<h4>\r
+    <a href = "NumericRangeQuery.html">NumericRangeQuery</a>\r
+</h4>\r
+\r
+<p>The\r
+    <a href = "NumericRangeQuery.html">NumericRangeQuery</a>\r
+    matches all documents that occur in a numeric range.\r
+    For NumericRangeQuery to work, you must index the values\r
+    using a special <a href = "document/NumericField.html">\r
+    NumericField</a>.\r
+</p>\r
+\r
+<h4>\r
+    <a href = "PrefixQuery.html">PrefixQuery</a>,\r
+    <a href = "WildcardQuery.html">WildcardQuery</a>\r
+</h4>\r
+\r
+<p>While the\r
+    <a href = "PrefixQuery.html">PrefixQuery</a>\r
+    has a different implementation, it is essentially a special case of the\r
+    <a href = "WildcardQuery.html">WildcardQuery</a>.\r
+    The <a href = "PrefixQuery.html">PrefixQuery</a> allows an application\r
+    to identify all documents with terms that begin with a certain string. The <a\r
+        href="WildcardQuery.html">WildcardQuery</a> generalizes this by allowing\r
+    for the use of <tt>*</tt> (matches 0 or more characters) and <tt>?</tt> (matches exactly one character) wildcards.\r
+    Note that the <a href = "WildcardQuery.html">WildcardQuery</a> can be quite slow. Also\r
+    note that\r
+    <a href = "WildcardQuery.html">WildcardQuery</a> should\r
+    not start with <tt>*</tt> and <tt>?</tt>, as these are extremely slow. \r
+       To remove this protection and allow a wildcard at the beginning of a term, see method\r
+       <a href = "queryParser/QueryParser.html#setAllowLeadingWildcard(boolean)">setAllowLeadingWildcard</a> in \r
+       <a href = "queryParser/QueryParser.html">QueryParser</a>.\r
+</p>\r
+<h4>\r
+    <a href = "FuzzyQuery.html">FuzzyQuery</a>\r
+</h4>\r
+\r
+<p>A\r
+    <a href = "FuzzyQuery.html">FuzzyQuery</a>\r
+    matches documents that contain terms similar to the specified term. Similarity is\r
+    determined using\r
+    <a href = "http://en.wikipedia.org//wiki/Levenshtein">Levenshtein (edit) distance</a>.\r
+    This type of query can be useful when accounting for spelling variations in the collection.\r
+</p>\r
+<a name = "changingSimilarity"></a>\r
+<h2>Changing Similarity</h2>\r
+\r
+<p>Chances are <a href = "DefaultSimilarity.html">DefaultSimilarity</a> is sufficient for all\r
+    your searching needs.\r
+    However, in some applications it may be necessary to customize your <a\r
+        href="Similarity.html">Similarity</a> implementation. For instance, some\r
+    applications do not need to\r
+    distinguish between shorter and longer documents (see <a\r
+        href="http://www.gossamer-threads.com/lists/lucene/java-user/38967#38967">a "fair" similarity</a>).</p>\r
+\r
+<p>To change <a href = "Similarity.html">Similarity</a>, one must do so for both indexing and\r
+    searching, and the changes must happen before\r
+    either of these actions take place. Although in theory there is nothing stopping you from changing mid-stream, it\r
+    just isn't well-defined what is going to happen.\r
+</p>\r
+\r
+<p>To make this change, implement your own <a href = "Similarity.html">Similarity</a> (likely\r
+    you'll want to simply subclass\r
+    <a href = "DefaultSimilarity.html">DefaultSimilarity</a>) and then use the new\r
+    class by calling\r
+    <a href = "index/IndexWriter.html#setSimilarity(Lucene.Net.Search.Similarity)">IndexWriter.setSimilarity</a>\r
+    before indexing and\r
+    <a href = "Searcher.html#setSimilarity(Lucene.Net.Search.Similarity)">Searcher.setSimilarity</a>\r
+    before searching.\r
+</p>\r
+\r
+<p>\r
+    If you are interested in use cases for changing your similarity, see the Lucene users's mailing list at <a\r
+        href="http://www.nabble.com/Overriding-Similarity-tf2128934.html">Overriding Similarity</a>.\r
+    In summary, here are a few use cases:\r
+    <ol>\r
+        <li><p><a href = "api/org/apache/lucene/misc/SweetSpotSimilarity.html">SweetSpotSimilarity</a> &mdash; <a\r
+                href="api/org/apache/lucene/misc/SweetSpotSimilarity.html">SweetSpotSimilarity</a> gives small increases\r
+            as the frequency increases a small amount\r
+            and then greater increases when you hit the "sweet spot", i.e. where you think the frequency of terms is\r
+            more significant.</p></li>\r
+        <li><p>Overriding tf &mdash; In some applications, it doesn't matter what the score of a document is as long as a\r
+            matching term occurs. In these\r
+            cases people have overridden Similarity to return 1 from the tf() method.</p></li>\r
+        <li><p>Changing Length Normalization &mdash; By overriding <a\r
+                href="Similarity.html#lengthNorm(java.lang.String,%20int)">lengthNorm</a>,\r
+            it is possible to discount how the length of a field contributes\r
+            to a score. In <a href = "DefaultSimilarity.html">DefaultSimilarity</a>,\r
+            lengthNorm = 1 / (numTerms in field)^0.5, but if one changes this to be\r
+            1 / (numTerms in field), all fields will be treated\r
+            <a href = "http://www.gossamer-threads.com//lists/lucene/java-user/38967#38967">"fairly"</a>.</p></li>\r
+    </ol>\r
+    In general, Chris Hostetter sums it up best in saying (from <a\r
+        href="http://www.gossamer-threads.com/lists/lucene/java-user/39125#39125">the Lucene users's mailing list</a>):\r
+    <blockquote>[One would override the Similarity in] ... any situation where you know more about your data then just\r
+        that\r
+        it's "text" is a situation where it *might* make sense to to override your\r
+        Similarity method.</blockquote>\r
+</p>\r
+<a name = "scoring"></a>\r
+<h2>Changing Scoring &mdash; Expert Level</h2>\r
+\r
+<p>Changing scoring is an expert level task, so tread carefully and be prepared to share your code if\r
+    you want help.\r
+</p>\r
+\r
+<p>With the warning out of the way, it is possible to change a lot more than just the Similarity\r
+    when it comes to scoring in Lucene. Lucene's scoring is a complex mechanism that is grounded by\r
+    <span >three main classes</span>:\r
+    <ol>\r
+        <li>\r
+            <a href = "Query.html">Query</a> &mdash; The abstract object representation of the\r
+            user's information need.</li>\r
+        <li>\r
+            <a href = "Weight.html">Weight</a> &mdash; The internal interface representation of\r
+            the user's Query, so that Query objects may be reused.</li>\r
+        <li>\r
+            <a href = "Scorer.html">Scorer</a> &mdash; An abstract class containing common\r
+            functionality for scoring. Provides both scoring and explanation capabilities.</li>\r
+    </ol>\r
+    Details on each of these classes, and their children, can be found in the subsections below.\r
+</p>\r
+<h4>The Query Class</h4>\r
+    <p>In some sense, the\r
+        <a href = "Query.html">Query</a>\r
+        class is where it all begins. Without a Query, there would be\r
+        nothing to score. Furthermore, the Query class is the catalyst for the other scoring classes as it\r
+        is often responsible\r
+        for creating them or coordinating the functionality between them. The\r
+        <a href = "Query.html">Query</a> class has several methods that are important for\r
+        derived classes:\r
+        <ol>\r
+            <li>createWeight(Searcher searcher) &mdash; A\r
+                <a href = "Weight.html">Weight</a> is the internal representation of the\r
+                Query, so each Query implementation must\r
+                provide an implementation of Weight. See the subsection on <a\r
+                    href="#The Weight Interface">The Weight Interface</a> below for details on implementing the Weight\r
+                interface.</li>\r
+            <li>rewrite(IndexReader reader) &mdash; Rewrites queries into primitive queries. Primitive queries are:\r
+                <a href = "TermQuery.html">TermQuery</a>,\r
+                <a href = "BooleanQuery.html">BooleanQuery</a>, <span\r
+                    >and other queries that implement Query.html#createWeight(Searcher searcher)</span></li>\r
+        </ol>\r
+    </p>\r
+<h4>The Weight Interface</h4>\r
+    <p>The\r
+        <a href = "Weight.html">Weight</a>\r
+        interface provides an internal representation of the Query so that it can be reused. Any\r
+        <a href = "Searcher.html">Searcher</a>\r
+        dependent state should be stored in the Weight implementation,\r
+        not in the Query class. The interface defines six methods that must be implemented:\r
+        <ol>\r
+            <li>\r
+                <a href = "Weight.html#getQuery()">Weight#getQuery()</a> &mdash; Pointer to the\r
+                Query that this Weight represents.</li>\r
+            <li>\r
+                <a href = "Weight.html#getValue()">Weight#getValue()</a> &mdash; The weight for\r
+                this Query. For example, the TermQuery.TermWeight value is\r
+                equal to the idf^2 * boost * queryNorm <!-- DOUBLE CHECK THIS --></li>\r
+            <li>\r
+                <a href = "Weight.html#sumOfSquaredWeights()">\r
+                    Weight#sumOfSquaredWeights()</a> &mdash; The sum of squared weights. For TermQuery, this is (idf *\r
+                boost)^2</li>\r
+            <li>\r
+                <a href = "Weight.html#normalize(float)">\r
+                    Weight#normalize(float)</a> &mdash; Determine the query normalization factor. The query normalization may\r
+                allow for comparing scores between queries.</li>\r
+            <li>\r
+                <a href = "Weight.html#scorer(Lucene.Net.Index.IndexReader, boolean, boolean)">\r
+                    Weight#scorer(IndexReader, boolean, boolean)</a> &mdash; Construct a new\r
+                <a href = "Scorer.html">Scorer</a>\r
+                for this Weight. See\r
+                <a href = "#The Scorer Class">The Scorer Class</a>\r
+                below for help defining a Scorer. As the name implies, the\r
+                Scorer is responsible for doing the actual scoring of documents given the Query.\r
+            </li>\r
+            <li>\r
+                <a href = "Weight.html#explain(Lucene.Net.Search.Searcher, Lucene.Net.Index.IndexReader, int)">\r
+                    Weight#explain(Searcher, IndexReader, int)</a> &mdash; Provide a means for explaining why a given document was\r
+                scored\r
+                the way it was.</li>\r
+        </ol>\r
+    </p>\r
+<h4>The Scorer Class</h4>\r
+    <p>The\r
+        <a href = "Scorer.html">Scorer</a>\r
+        abstract class provides common scoring functionality for all Scorer implementations and\r
+        is the heart of the Lucene scoring process. The Scorer defines the following abstract (they are not\r
+        yet abstract, but will be in Lucene 3.0 and should be considered as such now) methods which\r
+        must be implemented (some of them inherited from <a href = "DocIdSetIterator.html">DocIdSetIterator</a> ):\r
+        <ol>\r
+            <li>\r
+                <a href = "DocIdSetIterator.html#nextDoc()">DocIdSetIterator#nextDoc()</a> &mdash; Advances to the next\r
+                document that matches this Query, returning true if and only\r
+                if there is another document that matches.</li>\r
+            <li>\r
+                <a href = "DocIdSetIterator.html#docID()">DocIdSetIterator#docID()</a> &mdash; Returns the id of the\r
+                <a href = "document/Document.html">Document</a>\r
+                that contains the match. It is not valid until next() has been called at least once.\r
+            </li>\r
+            <li>\r
+                <a href = "Scorer.html#score(Lucene.Net.Search.Collector)">Scorer#score(Collector)</a> &mdash;\r
+                Scores and collects all matching documents using the given Collector.\r
+            </li>\r
+            <li>\r
+                <a href = "Scorer.html#score()">Scorer#score()</a> &mdash; Return the score of the\r
+                current document. This value can be determined in any\r
+                appropriate way for an application. For instance, the\r
+                <a href = "http://svn.apache.org//viewvc/lucene/java/trunk/src/java/org/apache/lucene/search/TermScorer.java?view=log">TermScorer</a>\r
+                returns the tf * Weight.getValue() * fieldNorm.\r
+            </li>\r
+            <li>\r
+                <a href = "DocIdSetIterator.html#advance(int)">DocIdSetIterator#advance(int)</a> &mdash; Skip ahead in\r
+                the document matches to the document whose id is greater than\r
+                or equal to the passed in value. In many instances, advance can be\r
+                implemented more efficiently than simply looping through all the matching documents until\r
+                the target document is identified.</li>\r
+        </ol>\r
+    </p>\r
+<h4>Why would I want to add my own Query?</h4>\r
+\r
+    <p>In a nutshell, you want to add your own custom Query implementation when you think that Lucene's\r
+        aren't appropriate for the\r
+        task that you want to do. You might be doing some cutting edge research or you need more information\r
+        back\r
+        out of Lucene (similar to Doug adding SpanQuery functionality).</p>\r
+\r
+</body>\r
+</html>\r
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/ParallelMultiSearcher.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/ParallelMultiSearcher.cs
new file mode 100644 (file)
index 0000000..0c4679e
--- /dev/null
@@ -0,0 +1,346 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+using Term = Mono.Lucene.Net.Index.Term;
+using PriorityQueue = Mono.Lucene.Net.Util.PriorityQueue;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary>Implements parallel search over a set of <code>Searchables</code>.
+       /// 
+       /// <p/>Applications usually need only call the inherited {@link #Search(Query)}
+       /// or {@link #Search(Query,Filter)} methods.
+       /// </summary>
+       public class ParallelMultiSearcher:MultiSearcher
+       {
+               private class AnonymousClassCollector1:Collector
+               {
+                       public AnonymousClassCollector1(Mono.Lucene.Net.Search.Collector collector, int start, ParallelMultiSearcher enclosingInstance)
+                       {
+                               InitBlock(collector, start, enclosingInstance);
+                       }
+                       private void  InitBlock(Mono.Lucene.Net.Search.Collector collector, int start, ParallelMultiSearcher enclosingInstance)
+                       {
+                               this.collector = collector;
+                               this.start = start;
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private Mono.Lucene.Net.Search.Collector collector;
+                       private int start;
+                       private ParallelMultiSearcher enclosingInstance;
+                       public ParallelMultiSearcher Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       public override void  SetScorer(Scorer scorer)
+                       {
+                               collector.SetScorer(scorer);
+                       }
+                       public override void  Collect(int doc)
+                       {
+                               collector.Collect(doc);
+                       }
+                       public override void  SetNextReader(IndexReader reader, int docBase)
+                       {
+                               collector.SetNextReader(reader, start + docBase);
+                       }
+                       public override bool AcceptsDocsOutOfOrder()
+                       {
+                               return collector.AcceptsDocsOutOfOrder();
+                       }
+               }
+               
+               private Searchable[] searchables;
+               private int[] starts;
+               
+               /// <summary>Creates a searchable which searches <i>searchables</i>. </summary>
+               public ParallelMultiSearcher(params Searchable[] searchables):base(searchables)
+               {
+                       this.searchables = searchables;
+                       this.starts = GetStarts();
+               }
+               
+               /// <summary> TODO: parallelize this one too</summary>
+               public override int DocFreq(Term term)
+               {
+                       return base.DocFreq(term);
+               }
+               
+               /// <summary> A search implementation which spans a new thread for each
+               /// Searchable, waits for each search to complete and merge
+               /// the results back together.
+               /// </summary>
+               public override TopDocs Search(Weight weight, Filter filter, int nDocs)
+               {
+                       HitQueue hq = new HitQueue(nDocs, false);
+                       int totalHits = 0;
+                       MultiSearcherThread[] msta = new MultiSearcherThread[searchables.Length];
+                       for (int i = 0; i < searchables.Length; i++)
+                       {
+                               // search each searchable
+                               // Assume not too many searchables and cost of creating a thread is by far inferior to a search
+                               msta[i] = new MultiSearcherThread(searchables[i], weight, filter, nDocs, hq, i, starts, "MultiSearcher thread #" + (i + 1));
+                               msta[i].Start();
+                       }
+                       
+                       for (int i = 0; i < searchables.Length; i++)
+                       {
+                               try
+                               {
+                                       msta[i].Join();
+                               }
+                               catch (System.Threading.ThreadInterruptedException ie)
+                               {
+                                       // In 3.0 we will change this to throw
+                                       // InterruptedException instead
+                                       SupportClass.ThreadClass.Current().Interrupt();
+                                       throw new System.SystemException(ie.Message, ie);
+                               }
+                               System.IO.IOException ioe = msta[i].GetIOException();
+                               if (ioe == null)
+                               {
+                                       totalHits += msta[i].Hits();
+                               }
+                               else
+                               {
+                                       // if one search produced an IOException, rethrow it
+                                       throw ioe;
+                               }
+                       }
+                       
+                       ScoreDoc[] scoreDocs = new ScoreDoc[hq.Size()];
+                       for (int i = hq.Size() - 1; i >= 0; i--)
+                       // put docs in array
+                               scoreDocs[i] = (ScoreDoc) hq.Pop();
+                       
+                       float maxScore = (totalHits == 0)?System.Single.NegativeInfinity:scoreDocs[0].score;
+                       
+                       return new TopDocs(totalHits, scoreDocs, maxScore);
+               }
+               
+               /// <summary> A search implementation allowing sorting which spans a new thread for each
+               /// Searchable, waits for each search to complete and merges
+               /// the results back together.
+               /// </summary>
+               public override TopFieldDocs Search(Weight weight, Filter filter, int nDocs, Sort sort)
+               {
+                       // don't specify the fields - we'll wait to do this until we get results
+                       FieldDocSortedHitQueue hq = new FieldDocSortedHitQueue(null, nDocs);
+                       int totalHits = 0;
+                       MultiSearcherThread[] msta = new MultiSearcherThread[searchables.Length];
+                       for (int i = 0; i < searchables.Length; i++)
+                       {
+                               // search each searchable
+                               // Assume not too many searchables and cost of creating a thread is by far inferior to a search
+                               msta[i] = new MultiSearcherThread(searchables[i], weight, filter, nDocs, hq, sort, i, starts, "MultiSearcher thread #" + (i + 1));
+                               msta[i].Start();
+                       }
+                       
+                       float maxScore = System.Single.NegativeInfinity;
+                       
+                       for (int i = 0; i < searchables.Length; i++)
+                       {
+                               try
+                               {
+                                       msta[i].Join();
+                               }
+                               catch (System.Threading.ThreadInterruptedException ie)
+                               {
+                                       // In 3.0 we will change this to throw
+                                       // InterruptedException instead
+                                       SupportClass.ThreadClass.Current().Interrupt();
+                                       throw new System.SystemException(ie.Message, ie);
+                               }
+                               System.IO.IOException ioe = msta[i].GetIOException();
+                               if (ioe == null)
+                               {
+                                       totalHits += msta[i].Hits();
+                                       maxScore = System.Math.Max(maxScore, msta[i].GetMaxScore());
+                               }
+                               else
+                               {
+                                       // if one search produced an IOException, rethrow it
+                                       throw ioe;
+                               }
+                       }
+                       
+                       ScoreDoc[] scoreDocs = new ScoreDoc[hq.Size()];
+                       for (int i = hq.Size() - 1; i >= 0; i--)
+                       // put docs in array
+                               scoreDocs[i] = (ScoreDoc) hq.Pop();
+                       
+                       return new TopFieldDocs(totalHits, scoreDocs, hq.GetFields(), maxScore);
+               }
+               
+               /// <summary>Lower-level search API.
+               /// 
+               /// <p/>{@link Collector#Collect(int)} is called for every matching document.
+               /// 
+               /// <p/>Applications should only use this if they need <i>all</i> of the
+               /// matching documents.  The high-level search API ({@link
+               /// Searcher#Search(Query)}) is usually more efficient, as it skips
+               /// non-high-scoring hits.
+               /// 
+               /// </summary>
+               /// <param name="weight">to match documents
+               /// </param>
+               /// <param name="filter">if non-null, a bitset used to eliminate some documents
+               /// </param>
+               /// <param name="collector">to receive hits
+               /// 
+               /// TODO: parallelize this one too
+               /// </param>
+               public override void  Search(Weight weight, Filter filter, Collector collector)
+               {
+                       for (int i = 0; i < searchables.Length; i++)
+                       {
+                               
+                               int start = starts[i];
+                               
+                               Collector hc = new AnonymousClassCollector1(collector, start, this);
+                               
+                               searchables[i].Search(weight, filter, hc);
+                       }
+               }
+               
+               /*
+               * TODO: this one could be parallelized too
+               * @see Mono.Lucene.Net.Search.Searchable#rewrite(Mono.Lucene.Net.Search.Query)
+               */
+               public override Query Rewrite(Query original)
+               {
+                       return base.Rewrite(original);
+               }
+       }
+       
+       /// <summary> A thread subclass for searching a single searchable </summary>
+       class MultiSearcherThread:SupportClass.ThreadClass
+       {
+               
+               private Searchable searchable;
+               private Weight weight;
+               private Filter filter;
+               private int nDocs;
+               private TopDocs docs;
+               private int i;
+               private PriorityQueue hq;
+               private int[] starts;
+               private System.Exception ioe;
+               private Sort sort;
+               
+               public MultiSearcherThread(Searchable searchable, Weight weight, Filter filter, int nDocs, HitQueue hq, int i, int[] starts, System.String name):base(name)
+               {
+                       this.searchable = searchable;
+                       this.weight = weight;
+                       this.filter = filter;
+                       this.nDocs = nDocs;
+                       this.hq = hq;
+                       this.i = i;
+                       this.starts = starts;
+               }
+               
+               public MultiSearcherThread(Searchable searchable, Weight weight, Filter filter, int nDocs, FieldDocSortedHitQueue hq, Sort sort, int i, int[] starts, System.String name):base(name)
+               {
+                       this.searchable = searchable;
+                       this.weight = weight;
+                       this.filter = filter;
+                       this.nDocs = nDocs;
+                       this.hq = hq;
+                       this.i = i;
+                       this.starts = starts;
+                       this.sort = sort;
+               }
+               
+               override public void  Run()
+               {
+                       try
+                       {
+                               docs = (sort == null)?searchable.Search(weight, filter, nDocs):searchable.Search(weight, filter, nDocs, sort);
+                       }
+                       // Store the IOException for later use by the caller of this thread
+                       catch (System.Exception e)
+                       {
+                               this.ioe = e;
+                       }
+                       if (this.ioe == null)
+                       {
+                               // if we are sorting by fields, we need to tell the field sorted hit queue
+                               // the actual type of fields, in case the original list contained AUTO.
+                               // if the searchable returns null for fields, we'll have problems.
+                               if (sort != null)
+                               {
+                                       TopFieldDocs docsFields = (TopFieldDocs) docs;
+                                       // If one of the Sort fields is FIELD_DOC, need to fix its values, so that
+                                       // it will break ties by doc Id properly. Otherwise, it will compare to
+                                       // 'relative' doc Ids, that belong to two different searchables.
+                                       for (int j = 0; j < docsFields.fields.Length; j++)
+                                       {
+                                               if (docsFields.fields[j].GetType() == SortField.DOC)
+                                               {
+                                                       // iterate over the score docs and change their fields value
+                                                       for (int j2 = 0; j2 < docs.ScoreDocs.Length; j2++)
+                                                       {
+                                                               FieldDoc fd = (FieldDoc) docs.ScoreDocs[j2];
+                                                               fd.fields[j] = (System.Int32) (((System.Int32) fd.fields[j]) + starts[i]);
+                                                       }
+                                                       break;
+                                               }
+                                       }
+                                       
+                                       ((FieldDocSortedHitQueue) hq).SetFields(docsFields.fields);
+                               }
+                               ScoreDoc[] scoreDocs = docs.ScoreDocs;
+                               for (int j = 0; j < scoreDocs.Length; j++)
+                               {
+                                       // merge scoreDocs into hq
+                                       ScoreDoc scoreDoc = scoreDocs[j];
+                                       scoreDoc.doc += starts[i]; // convert doc 
+                                       //it would be so nice if we had a thread-safe insert 
+                                       lock (hq)
+                                       {
+                                               if (!hq.Insert(scoreDoc))
+                                                       break;
+                                       } // no more scores > minScore
+                               }
+                       }
+               }
+               
+               public virtual int Hits()
+               {
+                       return docs.TotalHits;
+               }
+               
+               public virtual float GetMaxScore()
+               {
+                       return docs.GetMaxScore();
+               }
+               
+               public virtual System.IO.IOException GetIOException()
+               {
+            if (ioe == null) return null;
+            return new System.IO.IOException(ioe.Message);
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Payloads/AveragePayloadFunction.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Payloads/AveragePayloadFunction.cs
new file mode 100644 (file)
index 0000000..bdf2fb7
--- /dev/null
@@ -0,0 +1,63 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Search.Payloads
+{
+       
+       
+       /// <summary> Calculate the final score as the average score of all payloads seen.
+       /// <p/>
+       /// Is thread safe and completely reusable. 
+       /// 
+       /// 
+       /// </summary>
+       [Serializable]
+       public class AveragePayloadFunction:PayloadFunction
+       {
+               
+               public override float CurrentScore(int docId, System.String field, int start, int end, int numPayloadsSeen, float currentScore, float currentPayloadScore)
+               {
+                       return currentPayloadScore + currentScore;
+               }
+               
+               public override float DocScore(int docId, System.String field, int numPayloadsSeen, float payloadScore)
+               {
+                       return numPayloadsSeen > 0?(payloadScore / numPayloadsSeen):1;
+               }
+               
+               public override int GetHashCode()
+               {
+                       int prime = 31;
+                       int result = 1;
+                       result = prime * result + this.GetType().GetHashCode();
+                       return result;
+               }
+               
+               public  override bool Equals(System.Object obj)
+               {
+                       if (this == obj)
+                               return true;
+                       if (obj == null)
+                               return false;
+                       if (GetType() != obj.GetType())
+                               return false;
+                       return true;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Payloads/BoostingTermQuery.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Payloads/BoostingTermQuery.cs
new file mode 100644 (file)
index 0000000..bd4950e
--- /dev/null
@@ -0,0 +1,105 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+using Term = Mono.Lucene.Net.Index.Term;
+using Scorer = Mono.Lucene.Net.Search.Scorer;
+using Searcher = Mono.Lucene.Net.Search.Searcher;
+using Weight = Mono.Lucene.Net.Search.Weight;
+using TermSpans = Mono.Lucene.Net.Search.Spans.TermSpans;
+
+namespace Mono.Lucene.Net.Search.Payloads
+{
+       
+       /// <summary> The BoostingTermQuery is very similar to the {@link Mono.Lucene.Net.Search.Spans.SpanTermQuery} except
+       /// that it factors in the value of the payload located at each of the positions where the
+       /// {@link Mono.Lucene.Net.Index.Term} occurs.
+       /// <p/>
+       /// In order to take advantage of this, you must override {@link Mono.Lucene.Net.Search.Similarity#ScorePayload(String, byte[],int,int)}
+       /// which returns 1 by default.
+       /// <p/>
+       /// Payload scores are averaged across term occurrences in the document.  
+       /// 
+       /// </summary>
+       /// <seealso cref="Mono.Lucene.Net.Search.Similarity.ScorePayload(String, byte[], int, int)">
+       /// 
+       /// </seealso>
+       /// <deprecated> See {@link Mono.Lucene.Net.Search.Payloads.PayloadTermQuery}
+       /// </deprecated>
+    [Obsolete("See Mono.Lucene.Net.Search.Payloads.PayloadTermQuery")]
+       [Serializable]
+       public class BoostingTermQuery:PayloadTermQuery
+       {
+               
+               public BoostingTermQuery(Term term):this(term, true)
+               {
+               }
+               
+               public BoostingTermQuery(Term term, bool includeSpanScore):base(term, new AveragePayloadFunction(), includeSpanScore)
+               {
+               }
+               
+               public override Weight CreateWeight(Searcher searcher)
+               {
+                       return new BoostingTermWeight(this, this, searcher);
+               }
+               
+               [Serializable]
+               protected internal class BoostingTermWeight:PayloadTermWeight
+               {
+                       private void  InitBlock(BoostingTermQuery enclosingInstance)
+                       {
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private BoostingTermQuery enclosingInstance;
+                       public new BoostingTermQuery Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       
+                       public BoostingTermWeight(BoostingTermQuery enclosingInstance, BoostingTermQuery query, Searcher searcher):base(enclosingInstance, query, searcher)
+                       {
+                               InitBlock(enclosingInstance);
+                       }
+                       
+                       public override Scorer Scorer(IndexReader reader, bool scoreDocsInOrder, bool topScorer)
+                       {
+                               return new PayloadTermSpanScorer(this, (TermSpans) query.GetSpans(reader), this, similarity, reader.Norms(query.GetField()));
+                       }
+               }
+               
+               
+               public  override bool Equals(System.Object o)
+               {
+                       if (!(o is BoostingTermQuery))
+                               return false;
+                       BoostingTermQuery other = (BoostingTermQuery) o;
+                       return (this.GetBoost() == other.GetBoost()) && this.term.Equals(other.term);
+               }
+               
+               public override int GetHashCode()
+               {
+                       return base.GetHashCode();
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Payloads/MaxPayloadFunction.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Payloads/MaxPayloadFunction.cs
new file mode 100644 (file)
index 0000000..b816a9e
--- /dev/null
@@ -0,0 +1,69 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Search.Payloads
+{
+       
+       
+       /// <summary> Returns the maximum payload score seen, else 1 if there are no payloads on the doc.
+       /// <p/>
+       /// Is thread safe and completely reusable.
+       /// 
+       /// 
+       /// </summary>
+       [Serializable]
+       public class MaxPayloadFunction:PayloadFunction
+       {
+               public override float CurrentScore(int docId, System.String field, int start, int end, int numPayloadsSeen, float currentScore, float currentPayloadScore)
+               {
+            if (numPayloadsSeen == 0)
+            {
+                return currentPayloadScore;
+            }
+            else
+            {
+                return System.Math.Max(currentPayloadScore, currentScore);
+            }
+               }
+               
+               public override float DocScore(int docId, System.String field, int numPayloadsSeen, float payloadScore)
+               {
+                       return numPayloadsSeen > 0?payloadScore:1;
+               }
+               
+               public override int GetHashCode()
+               {
+                       int prime = 31;
+                       int result = 1;
+                       result = prime * result + this.GetType().GetHashCode();
+                       return result;
+               }
+               
+               public  override bool Equals(System.Object obj)
+               {
+                       if (this == obj)
+                               return true;
+                       if (obj == null)
+                               return false;
+                       if (GetType() != obj.GetType())
+                               return false;
+                       return true;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Payloads/MinPayloadFunction.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Payloads/MinPayloadFunction.cs
new file mode 100644 (file)
index 0000000..2a41463
--- /dev/null
@@ -0,0 +1,67 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Search.Payloads
+{
+       
+       /// <summary> Calculates the minimum payload seen
+       /// 
+       /// 
+       /// </summary>
+       [Serializable]
+       public class MinPayloadFunction:PayloadFunction
+       {
+               
+               public override float CurrentScore(int docId, System.String field, int start, int end, int numPayloadsSeen, float currentScore, float currentPayloadScore)
+               {
+            if (numPayloadsSeen == 0)
+            {
+                return currentPayloadScore;
+            }
+            else
+            {
+                return System.Math.Min(currentPayloadScore, currentScore);
+            }
+               }
+               
+               public override float DocScore(int docId, System.String field, int numPayloadsSeen, float payloadScore)
+               {
+                       return numPayloadsSeen > 0?payloadScore:1;
+               }
+               
+               public override int GetHashCode()
+               {
+                       int prime = 31;
+                       int result = 1;
+                       result = prime * result + this.GetType().GetHashCode();
+                       return result;
+               }
+               
+               public  override bool Equals(System.Object obj)
+               {
+                       if (this == obj)
+                               return true;
+                       if (obj == null)
+                               return false;
+                       if (GetType() != obj.GetType())
+                               return false;
+                       return true;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Payloads/Package.html b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Payloads/Package.html
new file mode 100644 (file)
index 0000000..c20b497
--- /dev/null
@@ -0,0 +1,37 @@
+<HTML>\r
+<!--\r
+ Licensed to the Apache Software Foundation (ASF) under one or more\r
+ contributor license agreements.  See the NOTICE file distributed with\r
+ this work for additional information regarding copyright ownership.\r
+ The ASF licenses this file to You under the Apache License, Version 2.0\r
+ (the "License"); you may not use this file except in compliance with\r
+ the License.  You may obtain a copy of the License at\r
+\r
+     http://www.apache.org/licenses/LICENSE-2.0\r
+\r
+ Unless required by applicable law or agreed to in writing, software\r
+ distributed under the License is distributed on an "AS IS" BASIS,\r
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
+ See the License for the specific language governing permissions and\r
+ limitations under the License.\r
+-->\r
+<HEAD>\r
+    <TITLE>org.apache.lucene.search.payloads</TITLE>\r
+</HEAD>\r
+<BODY>\r
+<DIV>The payloads package provides Query mechanisms for finding and using payloads.\r
+\r
+  The following Query implementations are provided:\r
+</DIV>\r
+<div>\r
+  <ol>\r
+    <li><a href = "PayloadTermQuery.html">PayloadTermQuery</a> -- Boost a term's score based on the value of the payload located at that term.</li>\r
+       <li><a href = "PayloadNearQuery.html">PayloadNearQuery</a> -- A <a href = "spans/SpanNearQuery.html">SpanNearQuery</a> that factors in the value of the payloads located \r
+       at each of the positions where the spans occur.</li>\r
+  </ol>\r
+</div>\r
+<DIV>&nbsp;</DIV>\r
+<DIV align="center">\r
+</DIV>\r
+</BODY>\r
+</HTML>\r
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Payloads/PayloadFunction.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Payloads/PayloadFunction.cs
new file mode 100644 (file)
index 0000000..b33d397
--- /dev/null
@@ -0,0 +1,78 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Search.Payloads
+{
+       
+       
+       /// <summary> An abstract class that defines a way for Payload*Query instances
+       /// to transform the cumulative effects of payload scores for a document.
+       /// 
+       /// </summary>
+       /// <seealso cref="Mono.Lucene.Net.Search.Payloads.PayloadTermQuery"> for more information
+       /// 
+       /// <p/>
+       /// This class and its derivations are experimental and subject to change
+       /// 
+       /// 
+       /// </seealso>
+       [Serializable]
+       public abstract class PayloadFunction
+       {
+               
+               /// <summary> Calculate the score up to this point for this doc and field</summary>
+               /// <param name="docId">The current doc
+               /// </param>
+               /// <param name="field">The field
+               /// </param>
+               /// <param name="start">The start position of the matching Span
+               /// </param>
+               /// <param name="end">The end position of the matching Span
+               /// </param>
+               /// <param name="numPayloadsSeen">The number of payloads seen so far
+               /// </param>
+               /// <param name="currentScore">The current score so far
+               /// </param>
+               /// <param name="currentPayloadScore">The score for the current payload
+               /// </param>
+               /// <returns> The new current Score
+               /// 
+               /// </returns>
+               /// <seealso cref="Mono.Lucene.Net.Search.Spans.Spans">
+               /// </seealso>
+               public abstract float CurrentScore(int docId, System.String field, int start, int end, int numPayloadsSeen, float currentScore, float currentPayloadScore);
+               
+               /// <summary> Calculate the final score for all the payloads seen so far for this doc/field</summary>
+               /// <param name="docId">The current doc
+               /// </param>
+               /// <param name="field">The current field
+               /// </param>
+               /// <param name="numPayloadsSeen">The total number of payloads seen on this document
+               /// </param>
+               /// <param name="payloadScore">The raw score for those payloads
+               /// </param>
+               /// <returns> The final score for the payloads
+               /// </returns>
+               public abstract float DocScore(int docId, System.String field, int numPayloadsSeen, float payloadScore);
+               
+               abstract public override int GetHashCode();
+               
+               abstract public  override bool Equals(System.Object o);
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Payloads/PayloadNearQuery.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Payloads/PayloadNearQuery.cs
new file mode 100644 (file)
index 0000000..56a4ee8
--- /dev/null
@@ -0,0 +1,290 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+using ToStringUtils = Mono.Lucene.Net.Util.ToStringUtils;
+using Explanation = Mono.Lucene.Net.Search.Explanation;
+using Scorer = Mono.Lucene.Net.Search.Scorer;
+using Searcher = Mono.Lucene.Net.Search.Searcher;
+using Similarity = Mono.Lucene.Net.Search.Similarity;
+using Weight = Mono.Lucene.Net.Search.Weight;
+using NearSpansOrdered = Mono.Lucene.Net.Search.Spans.NearSpansOrdered;
+using NearSpansUnordered = Mono.Lucene.Net.Search.Spans.NearSpansUnordered;
+using SpanNearQuery = Mono.Lucene.Net.Search.Spans.SpanNearQuery;
+using SpanQuery = Mono.Lucene.Net.Search.Spans.SpanQuery;
+using SpanScorer = Mono.Lucene.Net.Search.Spans.SpanScorer;
+using SpanWeight = Mono.Lucene.Net.Search.Spans.SpanWeight;
+
+namespace Mono.Lucene.Net.Search.Payloads
+{
+       
+       /// <summary> This class is very similar to
+       /// {@link Mono.Lucene.Net.Search.Spans.SpanNearQuery} except that it factors
+       /// in the value of the payloads located at each of the positions where the
+       /// {@link Mono.Lucene.Net.Search.Spans.TermSpans} occurs.
+       /// <p/>
+       /// In order to take advantage of this, you must override
+       /// {@link Mono.Lucene.Net.Search.Similarity#ScorePayload(String, byte[],int,int)}
+       /// which returns 1 by default.
+       /// <p/>
+       /// Payload scores are aggregated using a pluggable {@link PayloadFunction}.
+       /// 
+       /// </summary>
+       /// <seealso cref="Mono.Lucene.Net.Search.Similarity.ScorePayload(String, byte[], int,int)">
+       /// </seealso>
+       [Serializable]
+       public class PayloadNearQuery:SpanNearQuery, System.ICloneable
+       {
+               protected internal System.String fieldName;
+               protected internal PayloadFunction function;
+               
+               public PayloadNearQuery(SpanQuery[] clauses, int slop, bool inOrder):this(clauses, slop, inOrder, new AveragePayloadFunction())
+               {
+               }
+               
+               public PayloadNearQuery(SpanQuery[] clauses, int slop, bool inOrder, PayloadFunction function):base(clauses, slop, inOrder)
+               {
+                       fieldName = clauses[0].GetField(); // all clauses must have same field
+                       this.function = function;
+               }
+               
+               public override Weight CreateWeight(Searcher searcher)
+               {
+                       return new PayloadNearSpanWeight(this, this, searcher);
+               }
+               
+               public override System.Object Clone()
+               {
+                       int sz = clauses.Count;
+                       SpanQuery[] newClauses = new SpanQuery[sz];
+                       
+                       for (int i = 0; i < sz; i++)
+                       {
+                               SpanQuery clause = (SpanQuery) clauses[i];
+                               newClauses[i] = (SpanQuery) clause.Clone();
+                       }
+                       PayloadNearQuery boostingNearQuery = new PayloadNearQuery(newClauses, slop, inOrder);
+                       boostingNearQuery.SetBoost(GetBoost());
+                       return boostingNearQuery;
+               }
+               
+               public override System.String ToString(System.String field)
+               {
+                       System.Text.StringBuilder buffer = new System.Text.StringBuilder();
+                       buffer.Append("payloadNear([");
+                       System.Collections.IEnumerator i = clauses.GetEnumerator();
+                       while (i.MoveNext())
+                       {
+                               SpanQuery clause = (SpanQuery) i.Current;
+                               buffer.Append(clause.ToString(field));
+                               if (i.MoveNext())
+                               {
+                                       buffer.Append(", ");
+                               }
+                       }
+                       buffer.Append("], ");
+                       buffer.Append(slop);
+                       buffer.Append(", ");
+                       buffer.Append(inOrder);
+                       buffer.Append(")");
+                       buffer.Append(ToStringUtils.Boost(GetBoost()));
+                       return buffer.ToString();
+               }
+               
+               // @Override
+               public override int GetHashCode()
+               {
+                       int prime = 31;
+                       int result = base.GetHashCode();
+                       result = prime * result + ((fieldName == null)?0:fieldName.GetHashCode());
+                       result = prime * result + ((function == null)?0:function.GetHashCode());
+                       return result;
+               }
+               
+               // @Override
+               public  override bool Equals(System.Object obj)
+               {
+                       if (this == obj)
+                               return true;
+                       if (!base.Equals(obj))
+                               return false;
+                       if (GetType() != obj.GetType())
+                               return false;
+                       PayloadNearQuery other = (PayloadNearQuery) obj;
+                       if (fieldName == null)
+                       {
+                               if (other.fieldName != null)
+                                       return false;
+                       }
+                       else if (!fieldName.Equals(other.fieldName))
+                               return false;
+                       if (function == null)
+                       {
+                               if (other.function != null)
+                                       return false;
+                       }
+                       else if (!function.Equals(other.function))
+                               return false;
+                       return true;
+               }
+               
+               [Serializable]
+               public class PayloadNearSpanWeight:SpanWeight
+               {
+                       private void  InitBlock(PayloadNearQuery enclosingInstance)
+                       {
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private PayloadNearQuery enclosingInstance;
+                       public PayloadNearQuery Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       public PayloadNearSpanWeight(PayloadNearQuery enclosingInstance, SpanQuery query, Searcher searcher):base(query, searcher)
+                       {
+                               InitBlock(enclosingInstance);
+                       }
+                       
+                       public virtual Scorer Scorer(IndexReader reader)
+                       {
+                               return new PayloadNearSpanScorer(enclosingInstance, query.GetSpans(reader), this, similarity, reader.Norms(query.GetField()));
+                       }
+                       
+                       public override Scorer Scorer(IndexReader reader, bool scoreDocsInOrder, bool topScorer)
+                       {
+                               return new PayloadNearSpanScorer(enclosingInstance, query.GetSpans(reader), this, similarity, reader.Norms(query.GetField()));
+                       }
+               }
+               
+               public class PayloadNearSpanScorer:SpanScorer
+               {
+                       private void  InitBlock(PayloadNearQuery enclosingInstance)
+                       {
+                               this.enclosingInstance = enclosingInstance;
+                               similarity = GetSimilarity();
+                       }
+                       private PayloadNearQuery enclosingInstance;
+                       public PayloadNearQuery Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       new internal Mono.Lucene.Net.Search.Spans.Spans spans;
+                       
+                       protected internal float payloadScore;
+                       private int payloadsSeen;
+                       internal Similarity similarity;
+                       
+                       protected internal PayloadNearSpanScorer(PayloadNearQuery enclosingInstance, Mono.Lucene.Net.Search.Spans.Spans spans, Weight weight, Similarity similarity, byte[] norms):base(spans, weight, similarity, norms)
+                       {
+                               InitBlock(enclosingInstance);
+                               this.spans = spans;
+                       }
+                       
+                       // Get the payloads associated with all underlying subspans
+                       public virtual void  GetPayloads(Mono.Lucene.Net.Search.Spans.Spans[] subSpans)
+                       {
+                               for (int i = 0; i < subSpans.Length; i++)
+                               {
+                                       if (subSpans[i] is NearSpansOrdered)
+                                       {
+                                               if (((NearSpansOrdered) subSpans[i]).IsPayloadAvailable())
+                                               {
+                                                       ProcessPayloads(((NearSpansOrdered) subSpans[i]).GetPayload(), subSpans[i].Start(), subSpans[i].End());
+                                               }
+                                               GetPayloads(((NearSpansOrdered) subSpans[i]).GetSubSpans());
+                                       }
+                                       else if (subSpans[i] is NearSpansUnordered)
+                                       {
+                                               if (((NearSpansUnordered) subSpans[i]).IsPayloadAvailable())
+                                               {
+                                                       ProcessPayloads(((NearSpansUnordered) subSpans[i]).GetPayload(), subSpans[i].Start(), subSpans[i].End());
+                                               }
+                                               GetPayloads(((NearSpansUnordered) subSpans[i]).GetSubSpans());
+                                       }
+                               }
+                       }
+                       
+                       /// <summary> By default, uses the {@link PayloadFunction} to score the payloads, but
+                       /// can be overridden to do other things.
+                       /// 
+                       /// </summary>
+                       /// <param name="payLoads">The payloads
+                       /// </param>
+                       /// <param name="start">The start position of the span being scored
+                       /// </param>
+                       /// <param name="end">The end position of the span being scored
+                       /// 
+                       /// </param>
+                       /// <seealso cref="Spans">
+                       /// </seealso>
+                       protected internal virtual void  ProcessPayloads(System.Collections.Generic.ICollection<byte[]> payLoads, int start, int end)
+                       {
+                foreach (byte[] thePayload in payLoads)
+                {
+                    payloadScore = Enclosing_Instance.function.CurrentScore(doc, Enclosing_Instance.fieldName, start, end, payloadsSeen, payloadScore, similarity.ScorePayload(doc, Enclosing_Instance.fieldName, spans.Start(), spans.End(), thePayload, 0, thePayload.Length));
+                    ++payloadsSeen;
+                }
+                       }
+                       
+                       //
+                       public /*protected internal*/ override bool SetFreqCurrentDoc()
+                       {
+                               if (!more)
+                               {
+                                       return false;
+                               }
+                               Mono.Lucene.Net.Search.Spans.Spans[] spansArr = new Mono.Lucene.Net.Search.Spans.Spans[1];
+                               spansArr[0] = spans;
+                               payloadScore = 0;
+                               payloadsSeen = 0;
+                               GetPayloads(spansArr);
+                               return base.SetFreqCurrentDoc();
+                       }
+                       
+                       public override float Score()
+                       {
+                               
+                               return base.Score() * Enclosing_Instance.function.DocScore(doc, Enclosing_Instance.fieldName, payloadsSeen, payloadScore);
+                       }
+                       
+                       public override Explanation Explain(int doc)
+                       {
+                               Explanation result = new Explanation();
+                               Explanation nonPayloadExpl = base.Explain(doc);
+                               result.AddDetail(nonPayloadExpl);
+                               Explanation payloadBoost = new Explanation();
+                               result.AddDetail(payloadBoost);
+                               float avgPayloadScore = (payloadsSeen > 0?(payloadScore / payloadsSeen):1);
+                               payloadBoost.SetValue(avgPayloadScore);
+                               payloadBoost.SetDescription("scorePayload(...)");
+                               result.SetValue(nonPayloadExpl.GetValue() * avgPayloadScore);
+                               result.SetDescription("bnq, product of:");
+                               return result;
+                       }
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Payloads/PayloadSpanUtil.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Payloads/PayloadSpanUtil.cs
new file mode 100644 (file)
index 0000000..9f66fe3
--- /dev/null
@@ -0,0 +1,213 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+using System.Collections.Generic;
+
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+using Term = Mono.Lucene.Net.Index.Term;
+using BooleanClause = Mono.Lucene.Net.Search.BooleanClause;
+using BooleanQuery = Mono.Lucene.Net.Search.BooleanQuery;
+using DisjunctionMaxQuery = Mono.Lucene.Net.Search.DisjunctionMaxQuery;
+using FilteredQuery = Mono.Lucene.Net.Search.FilteredQuery;
+using MultiPhraseQuery = Mono.Lucene.Net.Search.MultiPhraseQuery;
+using PhraseQuery = Mono.Lucene.Net.Search.PhraseQuery;
+using Query = Mono.Lucene.Net.Search.Query;
+using TermQuery = Mono.Lucene.Net.Search.TermQuery;
+using SpanNearQuery = Mono.Lucene.Net.Search.Spans.SpanNearQuery;
+using SpanOrQuery = Mono.Lucene.Net.Search.Spans.SpanOrQuery;
+using SpanQuery = Mono.Lucene.Net.Search.Spans.SpanQuery;
+using SpanTermQuery = Mono.Lucene.Net.Search.Spans.SpanTermQuery;
+
+namespace Mono.Lucene.Net.Search.Payloads
+{
+       
+       /// <summary> Experimental class to get set of payloads for most standard Lucene queries.
+       /// Operates like Highlighter - IndexReader should only contain doc of interest,
+       /// best to use MemoryIndex.
+       /// 
+       /// <p/>
+       /// <font color="#FF0000">
+       /// WARNING: The status of the <b>Payloads</b> feature is experimental.
+       /// The APIs introduced here might change in the future and will not be
+       /// supported anymore in such a case.</font>
+       /// 
+       /// </summary>
+       public class PayloadSpanUtil
+       {
+               private IndexReader reader;
+               
+               /// <param name="reader">that contains doc with payloads to extract
+               /// </param>
+               public PayloadSpanUtil(IndexReader reader)
+               {
+                       this.reader = reader;
+               }
+               
+               /// <summary> Query should be rewritten for wild/fuzzy support.
+               /// 
+               /// </summary>
+               /// <param name="query">
+               /// </param>
+               /// <returns> payloads Collection
+               /// </returns>
+               /// <throws>  IOException </throws>
+               public virtual ICollection<byte[]> GetPayloadsForQuery(Query query)
+               {
+                       ICollection<byte[]> payloads = new List<byte[]>();
+                       QueryToSpanQuery(query, payloads);
+                       return payloads;
+               }
+               
+               private void  QueryToSpanQuery(Query query, ICollection<byte[]> payloads)
+               {
+                       if (query is BooleanQuery)
+                       {
+                               BooleanClause[] queryClauses = ((BooleanQuery) query).GetClauses();
+                               
+                               for (int i = 0; i < queryClauses.Length; i++)
+                               {
+                                       if (!queryClauses[i].IsProhibited())
+                                       {
+                                               QueryToSpanQuery(queryClauses[i].GetQuery(), payloads);
+                                       }
+                               }
+                       }
+                       else if (query is PhraseQuery)
+                       {
+                               Term[] phraseQueryTerms = ((PhraseQuery) query).GetTerms();
+                               SpanQuery[] clauses = new SpanQuery[phraseQueryTerms.Length];
+                               for (int i = 0; i < phraseQueryTerms.Length; i++)
+                               {
+                                       clauses[i] = new SpanTermQuery(phraseQueryTerms[i]);
+                               }
+                               
+                               int slop = ((PhraseQuery) query).GetSlop();
+                               bool inorder = false;
+                               
+                               if (slop == 0)
+                               {
+                                       inorder = true;
+                               }
+                               
+                               SpanNearQuery sp = new SpanNearQuery(clauses, slop, inorder);
+                               sp.SetBoost(query.GetBoost());
+                               GetPayloads(payloads, sp);
+                       }
+                       else if (query is TermQuery)
+                       {
+                               SpanTermQuery stq = new SpanTermQuery(((TermQuery) query).GetTerm());
+                               stq.SetBoost(query.GetBoost());
+                               GetPayloads(payloads, stq);
+                       }
+                       else if (query is SpanQuery)
+                       {
+                               GetPayloads(payloads, (SpanQuery) query);
+                       }
+                       else if (query is FilteredQuery)
+                       {
+                               QueryToSpanQuery(((FilteredQuery) query).GetQuery(), payloads);
+                       }
+                       else if (query is DisjunctionMaxQuery)
+                       {
+                               
+                               for (System.Collections.IEnumerator iterator = ((DisjunctionMaxQuery) query).Iterator(); iterator.MoveNext(); )
+                               {
+                                       QueryToSpanQuery((Query) iterator.Current, payloads);
+                               }
+                       }
+                       else if (query is MultiPhraseQuery)
+                       {
+                               MultiPhraseQuery mpq = (MultiPhraseQuery) query;
+                               System.Collections.IList termArrays = mpq.GetTermArrays();
+                               int[] positions = mpq.GetPositions();
+                               if (positions.Length > 0)
+                               {
+                                       
+                                       int maxPosition = positions[positions.Length - 1];
+                                       for (int i = 0; i < positions.Length - 1; ++i)
+                                       {
+                                               if (positions[i] > maxPosition)
+                                               {
+                                                       maxPosition = positions[i];
+                                               }
+                                       }
+                                       
+                                       System.Collections.ArrayList[] disjunctLists = new System.Collections.ArrayList[maxPosition + 1];
+                                       int distinctPositions = 0;
+                                       
+                                       for (int i = 0; i < termArrays.Count; ++i)
+                                       {
+                                               Term[] termArray = (Term[]) termArrays[i];
+                                               System.Collections.IList disjuncts = disjunctLists[positions[i]];
+                                               if (disjuncts == null)
+                                               {
+                                                       disjuncts = (disjunctLists[positions[i]] = new System.Collections.ArrayList(termArray.Length));
+                                                       ++distinctPositions;
+                                               }
+                                               for (int j = 0; j < termArray.Length; ++j)
+                                               {
+                                                       disjuncts.Add(new SpanTermQuery(termArray[j]));
+                                               }
+                                       }
+                                       
+                                       int positionGaps = 0;
+                                       int position = 0;
+                                       SpanQuery[] clauses = new SpanQuery[distinctPositions];
+                                       for (int i = 0; i < disjunctLists.Length; ++i)
+                                       {
+                                               System.Collections.ArrayList disjuncts = disjunctLists[i];
+                                               if (disjuncts != null)
+                                               {
+                            clauses[position++] = new SpanOrQuery((SpanQuery[]) (disjuncts.ToArray(typeof(SpanQuery[]))));
+                                               }
+                                               else
+                                               {
+                                                       ++positionGaps;
+                                               }
+                                       }
+                                       
+                                       int slop = mpq.GetSlop();
+                                       bool inorder = (slop == 0);
+                                       
+                                       SpanNearQuery sp = new SpanNearQuery(clauses, slop + positionGaps, inorder);
+                                       sp.SetBoost(query.GetBoost());
+                                       GetPayloads(payloads, sp);
+                               }
+                       }
+               }
+               
+               private void  GetPayloads(ICollection<byte[]> payloads, SpanQuery query)
+               {
+                       Mono.Lucene.Net.Search.Spans.Spans spans = query.GetSpans(reader);
+                       
+                       while (spans.Next() == true)
+                       {
+                               if (spans.IsPayloadAvailable())
+                               {
+                                       //ICollection<byte[]> payload = spans.GetPayload();
+                    System.Collections.Generic.ICollection<byte[]> payload = spans.GetPayload();
+                                       //IEnumerator<byte[]> it = payload.GetEnumerator();
+                    foreach (byte[] bytes in payload)
+                    {
+                        payloads.Add(bytes);
+                    }
+                               }
+                       }
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Payloads/PayloadTermQuery.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Payloads/PayloadTermQuery.cs
new file mode 100644 (file)
index 0000000..172a21a
--- /dev/null
@@ -0,0 +1,253 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+using Term = Mono.Lucene.Net.Index.Term;
+using TermPositions = Mono.Lucene.Net.Index.TermPositions;
+using ComplexExplanation = Mono.Lucene.Net.Search.ComplexExplanation;
+using Explanation = Mono.Lucene.Net.Search.Explanation;
+using Scorer = Mono.Lucene.Net.Search.Scorer;
+using Searcher = Mono.Lucene.Net.Search.Searcher;
+using Similarity = Mono.Lucene.Net.Search.Similarity;
+using Weight = Mono.Lucene.Net.Search.Weight;
+using SpanScorer = Mono.Lucene.Net.Search.Spans.SpanScorer;
+using SpanTermQuery = Mono.Lucene.Net.Search.Spans.SpanTermQuery;
+using SpanWeight = Mono.Lucene.Net.Search.Spans.SpanWeight;
+using TermSpans = Mono.Lucene.Net.Search.Spans.TermSpans;
+
+namespace Mono.Lucene.Net.Search.Payloads
+{
+       
+       /// <summary> This class is very similar to
+       /// {@link Mono.Lucene.Net.Search.Spans.SpanTermQuery} except that it factors
+       /// in the value of the payload located at each of the positions where the
+       /// {@link Mono.Lucene.Net.Index.Term} occurs.
+       /// <p/>
+       /// In order to take advantage of this, you must override
+       /// {@link Mono.Lucene.Net.Search.Similarity#ScorePayload(String, byte[],int,int)}
+       /// which returns 1 by default.
+       /// <p/>
+       /// Payload scores are aggregated using a pluggable {@link PayloadFunction}.
+       /// 
+       /// </summary>
+       [Serializable]
+       public class PayloadTermQuery:SpanTermQuery
+       {
+               protected internal PayloadFunction function;
+               private bool includeSpanScore;
+               
+               public PayloadTermQuery(Term term, PayloadFunction function):this(term, function, true)
+               {
+               }
+               
+               public PayloadTermQuery(Term term, PayloadFunction function, bool includeSpanScore):base(term)
+               {
+                       this.function = function;
+                       this.includeSpanScore = includeSpanScore;
+               }
+               
+               public override Weight CreateWeight(Searcher searcher)
+               {
+                       return new PayloadTermWeight(this, this, searcher);
+               }
+               
+               [Serializable]
+               protected internal class PayloadTermWeight:SpanWeight
+               {
+                       private void  InitBlock(PayloadTermQuery enclosingInstance)
+                       {
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private PayloadTermQuery enclosingInstance;
+                       public PayloadTermQuery Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       
+                       public PayloadTermWeight(PayloadTermQuery enclosingInstance, PayloadTermQuery query, Searcher searcher):base(query, searcher)
+                       {
+                               InitBlock(enclosingInstance);
+                       }
+                       
+                       public override Scorer Scorer(IndexReader reader, bool scoreDocsInOrder, bool topScorer)
+                       {
+                               return new PayloadTermSpanScorer(this, (TermSpans) query.GetSpans(reader), this, similarity, reader.Norms(query.GetField()));
+                       }
+                       
+                       protected internal class PayloadTermSpanScorer:SpanScorer
+                       {
+                               private void  InitBlock(PayloadTermWeight enclosingInstance)
+                               {
+                                       this.enclosingInstance = enclosingInstance;
+                               }
+                               private PayloadTermWeight enclosingInstance;
+                               public PayloadTermWeight Enclosing_Instance
+                               {
+                                       get
+                                       {
+                                               return enclosingInstance;
+                                       }
+                                       
+                               }
+                               // TODO: is this the best way to allocate this?
+                               protected internal byte[] payload = new byte[256];
+                               protected internal TermPositions positions;
+                               protected internal float payloadScore;
+                               protected internal int payloadsSeen;
+                               
+                               public PayloadTermSpanScorer(PayloadTermWeight enclosingInstance, TermSpans spans, Weight weight, Similarity similarity, byte[] norms):base(spans, weight, similarity, norms)
+                               {
+                                       InitBlock(enclosingInstance);
+                                       positions = spans.GetPositions();
+                               }
+                               
+                               public /*protected internal*/ override bool SetFreqCurrentDoc()
+                               {
+                                       if (!more)
+                                       {
+                                               return false;
+                                       }
+                                       doc = spans.Doc();
+                                       freq = 0.0f;
+                                       payloadScore = 0;
+                                       payloadsSeen = 0;
+                                       Similarity similarity1 = GetSimilarity();
+                                       while (more && doc == spans.Doc())
+                                       {
+                                               int matchLength = spans.End() - spans.Start();
+                                               
+                                               freq += similarity1.SloppyFreq(matchLength);
+                                               ProcessPayload(similarity1);
+                                               
+                                               more = spans.Next(); // this moves positions to the next match in this
+                                               // document
+                                       }
+                                       return more || (freq != 0);
+                               }
+                               
+                               protected internal virtual void  ProcessPayload(Similarity similarity)
+                               {
+                                       if (positions.IsPayloadAvailable())
+                                       {
+                                               payload = positions.GetPayload(payload, 0);
+                                               payloadScore = Enclosing_Instance.Enclosing_Instance.function.CurrentScore(doc, Enclosing_Instance.Enclosing_Instance.term.Field(), spans.Start(), spans.End(), payloadsSeen, payloadScore, similarity.ScorePayload(doc, Enclosing_Instance.Enclosing_Instance.term.Field(), spans.Start(), spans.End(), payload, 0, positions.GetPayloadLength()));
+                                               payloadsSeen++;
+                                       }
+                                       else
+                                       {
+                                               // zero out the payload?
+                                       }
+                               }
+                               
+                               /// <summary> </summary>
+                               /// <returns> {@link #GetSpanScore()} * {@link #GetPayloadScore()}
+                               /// </returns>
+                               /// <throws>  IOException </throws>
+                               public override float Score()
+                               {
+                                       
+                                       return Enclosing_Instance.Enclosing_Instance.includeSpanScore?GetSpanScore() * GetPayloadScore():GetPayloadScore();
+                               }
+                               
+                               /// <summary> Returns the SpanScorer score only.
+                               /// <p/>
+                               /// Should not be overriden without good cause!
+                               /// 
+                               /// </summary>
+                               /// <returns> the score for just the Span part w/o the payload
+                               /// </returns>
+                               /// <throws>  IOException </throws>
+                               /// <summary> 
+                               /// </summary>
+                               /// <seealso cref="Score()">
+                               /// </seealso>
+                               protected internal virtual float GetSpanScore()
+                               {
+                                       return base.Score();
+                               }
+                               
+                               /// <summary> The score for the payload
+                               /// 
+                               /// </summary>
+                               /// <returns> The score, as calculated by
+                               /// {@link PayloadFunction#DocScore(int, String, int, float)}
+                               /// </returns>
+                               protected internal virtual float GetPayloadScore()
+                               {
+                                       return Enclosing_Instance.Enclosing_Instance.function.DocScore(doc, Enclosing_Instance.Enclosing_Instance.term.Field(), payloadsSeen, payloadScore);
+                               }
+                               
+                               public override Explanation Explain(int doc)
+                               {
+                                       ComplexExplanation result = new ComplexExplanation();
+                                       Explanation nonPayloadExpl = base.Explain(doc);
+                                       result.AddDetail(nonPayloadExpl);
+                                       // QUESTION: Is there a way to avoid this skipTo call? We need to know
+                                       // whether to load the payload or not
+                                       Explanation payloadBoost = new Explanation();
+                                       result.AddDetail(payloadBoost);
+                                       
+                                       float payloadScore = GetPayloadScore();
+                                       payloadBoost.SetValue(payloadScore);
+                                       // GSI: I suppose we could toString the payload, but I don't think that
+                                       // would be a good idea
+                                       payloadBoost.SetDescription("scorePayload(...)");
+                                       result.SetValue(nonPayloadExpl.GetValue() * payloadScore);
+                                       result.SetDescription("btq, product of:");
+                                       result.SetMatch(nonPayloadExpl.GetValue() == 0?false:true); // LUCENE-1303
+                                       return result;
+                               }
+                       }
+               }
+               
+               public override int GetHashCode()
+               {
+                       int prime = 31;
+                       int result = base.GetHashCode();
+                       result = prime * result + ((function == null)?0:function.GetHashCode());
+                       result = prime * result + (includeSpanScore?1231:1237);
+                       return result;
+               }
+               
+               public  override bool Equals(System.Object obj)
+               {
+                       if (this == obj)
+                               return true;
+                       if (!base.Equals(obj))
+                               return false;
+                       if (GetType() != obj.GetType())
+                               return false;
+                       PayloadTermQuery other = (PayloadTermQuery) obj;
+                       if (function == null)
+                       {
+                               if (other.function != null)
+                                       return false;
+                       }
+                       else if (!function.Equals(other.function))
+                               return false;
+                       if (includeSpanScore != other.includeSpanScore)
+                               return false;
+                       return true;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/PhrasePositions.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/PhrasePositions.cs
new file mode 100644 (file)
index 0000000..2cbe5ad
--- /dev/null
@@ -0,0 +1,93 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using Mono.Lucene.Net.Index;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary> Position of a term in a document that takes into account the term offset within the phrase. </summary>
+       sealed class PhrasePositions
+       {
+               internal int doc; // current doc
+               internal int position; // position in doc
+               internal int count; // remaining pos in this doc
+               internal int offset; // position in phrase
+               internal TermPositions tp; // stream of positions
+               internal PhrasePositions next; // used to make lists
+               internal bool repeats; // there's other pp for same term (e.g. query="1st word 2nd word"~1) 
+               
+               internal PhrasePositions(TermPositions t, int o)
+               {
+                       tp = t;
+                       offset = o;
+               }
+               
+               internal bool Next()
+               {
+                       // increments to next doc
+                       if (!tp.Next())
+                       {
+                               tp.Close(); // close stream
+                               doc = System.Int32.MaxValue; // sentinel value
+                               return false;
+                       }
+                       doc = tp.Doc();
+                       position = 0;
+                       return true;
+               }
+               
+               internal bool SkipTo(int target)
+               {
+                       if (!tp.SkipTo(target))
+                       {
+                               tp.Close(); // close stream
+                               doc = System.Int32.MaxValue; // sentinel value
+                               return false;
+                       }
+                       doc = tp.Doc();
+                       position = 0;
+                       return true;
+               }
+               
+               
+               internal void  FirstPosition()
+               {
+                       count = tp.Freq(); // read first pos
+                       NextPosition();
+               }
+               
+               /// <summary> Go to next location of this term current document, and set 
+               /// <code>position</code> as <code>location - offset</code>, so that a 
+               /// matching exact phrase is easily identified when all PhrasePositions 
+               /// have exactly the same <code>position</code>.
+               /// </summary>
+               internal bool NextPosition()
+               {
+                       if (count-- > 0)
+                       {
+                               // read subsequent pos's
+                               position = tp.NextPosition() - offset;
+                               return true;
+                       }
+                       else
+                               return false;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/PhraseQuery.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/PhraseQuery.cs
new file mode 100644 (file)
index 0000000..0818f33
--- /dev/null
@@ -0,0 +1,368 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+using Term = Mono.Lucene.Net.Index.Term;
+using TermPositions = Mono.Lucene.Net.Index.TermPositions;
+using ToStringUtils = Mono.Lucene.Net.Util.ToStringUtils;
+using IDFExplanation = Mono.Lucene.Net.Search.Explanation.IDFExplanation;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary>A Query that matches documents containing a particular sequence of terms.
+       /// A PhraseQuery is built by QueryParser for input like <code>"new york"</code>.
+       /// 
+       /// <p/>This query may be combined with other terms or queries with a {@link BooleanQuery}.
+       /// </summary>
+       [Serializable]
+       public class PhraseQuery:Query
+       {
+               private System.String field;
+        private SupportClass.EquatableList<Term> terms = new SupportClass.EquatableList<Term>(4);
+        private SupportClass.EquatableList<int> positions = new SupportClass.EquatableList<int>(4);
+               private int maxPosition = 0;
+               private int slop = 0;
+               
+               /// <summary>Constructs an empty phrase query. </summary>
+               public PhraseQuery()
+               {
+               }
+               
+               /// <summary>Sets the number of other words permitted between words in query phrase.
+               /// If zero, then this is an exact phrase search.  For larger values this works
+               /// like a <code>WITHIN</code> or <code>NEAR</code> operator.
+               /// <p/>The slop is in fact an edit-distance, where the units correspond to
+               /// moves of terms in the query phrase out of position.  For example, to switch
+               /// the order of two words requires two moves (the first move places the words
+               /// atop one another), so to permit re-orderings of phrases, the slop must be
+               /// at least two.
+               /// <p/>More exact matches are scored higher than sloppier matches, thus search
+               /// results are sorted by exactness.
+               /// <p/>The slop is zero by default, requiring exact matches.
+               /// </summary>
+               public virtual void  SetSlop(int s)
+               {
+                       slop = s;
+               }
+               /// <summary>Returns the slop.  See setSlop(). </summary>
+               public virtual int GetSlop()
+               {
+                       return slop;
+               }
+               
+               /// <summary> Adds a term to the end of the query phrase.
+               /// The relative position of the term is the one immediately after the last term added.
+               /// </summary>
+               public virtual void  Add(Term term)
+               {
+                       int position = 0;
+                       if (positions.Count > 0)
+                               position = ((System.Int32) positions[positions.Count - 1]) + 1;
+                       
+                       Add(term, position);
+               }
+               
+               /// <summary> Adds a term to the end of the query phrase.
+               /// The relative position of the term within the phrase is specified explicitly.
+               /// This allows e.g. phrases with more than one term at the same position
+               /// or phrases with gaps (e.g. in connection with stopwords).
+               /// 
+               /// </summary>
+               /// <param name="term">
+               /// </param>
+               /// <param name="position">
+               /// </param>
+               public virtual void  Add(Term term, int position)
+               {
+                       if (terms.Count == 0)
+                               field = term.Field();
+                       else if ((System.Object) term.Field() != (System.Object) field)
+                       {
+                               throw new System.ArgumentException("All phrase terms must be in the same field: " + term);
+                       }
+                       
+                       terms.Add(term);
+                       positions.Add((System.Int32) position);
+                       if (position > maxPosition)
+                               maxPosition = position;
+               }
+               
+               /// <summary>Returns the set of terms in this phrase. </summary>
+               public virtual Term[] GetTerms()
+               {
+                       return (Term[])terms.ToArray();
+               }
+               
+               /// <summary> Returns the relative positions of terms in this phrase.</summary>
+               public virtual int[] GetPositions()
+               {
+                       int[] result = new int[positions.Count];
+                       for (int i = 0; i < positions.Count; i++)
+                               result[i] = ((System.Int32) positions[i]);
+                       return result;
+               }
+               
+               [Serializable]
+               private class PhraseWeight:Weight
+               {
+                       private void  InitBlock(PhraseQuery enclosingInstance)
+                       {
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private PhraseQuery enclosingInstance;
+                       public PhraseQuery Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       private Similarity similarity;
+                       private float value_Renamed;
+                       private float idf;
+                       private float queryNorm;
+                       private float queryWeight;
+                       private IDFExplanation idfExp;
+                       
+                       public PhraseWeight(PhraseQuery enclosingInstance, Searcher searcher)
+                       {
+                               InitBlock(enclosingInstance);
+                               this.similarity = Enclosing_Instance.GetSimilarity(searcher);
+                               
+                               idfExp = similarity.idfExplain(Enclosing_Instance.terms, searcher);
+                               idf = idfExp.GetIdf();
+                       }
+                       
+                       public override System.String ToString()
+                       {
+                               return "weight(" + Enclosing_Instance + ")";
+                       }
+                       
+                       public override Query GetQuery()
+                       {
+                               return Enclosing_Instance;
+                       }
+                       public override float GetValue()
+                       {
+                               return value_Renamed;
+                       }
+                       
+                       public override float SumOfSquaredWeights()
+                       {
+                               queryWeight = idf * Enclosing_Instance.GetBoost(); // compute query weight
+                               return queryWeight * queryWeight; // square it
+                       }
+                       
+                       public override void  Normalize(float queryNorm)
+                       {
+                               this.queryNorm = queryNorm;
+                               queryWeight *= queryNorm; // normalize query weight
+                               value_Renamed = queryWeight * idf; // idf for document 
+                       }
+                       
+                       public override Scorer Scorer(IndexReader reader, bool scoreDocsInOrder, bool topScorer)
+                       {
+                               if (Enclosing_Instance.terms.Count == 0)
+                               // optimize zero-term case
+                                       return null;
+                               
+                               TermPositions[] tps = new TermPositions[Enclosing_Instance.terms.Count];
+                               for (int i = 0; i < Enclosing_Instance.terms.Count; i++)
+                               {
+                                       TermPositions p = reader.TermPositions((Term) Enclosing_Instance.terms[i]);
+                                       if (p == null)
+                                               return null;
+                                       tps[i] = p;
+                               }
+                               
+                               if (Enclosing_Instance.slop == 0)
+                               // optimize exact case
+                                       return new ExactPhraseScorer(this, tps, Enclosing_Instance.GetPositions(), similarity, reader.Norms(Enclosing_Instance.field));
+                               else
+                                       return new SloppyPhraseScorer(this, tps, Enclosing_Instance.GetPositions(), similarity, Enclosing_Instance.slop, reader.Norms(Enclosing_Instance.field));
+                       }
+                       
+                       public override Explanation Explain(IndexReader reader, int doc)
+                       {
+                               
+                               Explanation result = new Explanation();
+                               result.SetDescription("weight(" + GetQuery() + " in " + doc + "), product of:");
+                               
+                               System.Text.StringBuilder docFreqs = new System.Text.StringBuilder();
+                               System.Text.StringBuilder query = new System.Text.StringBuilder();
+                               query.Append('\"');
+                               docFreqs.Append(idfExp.Explain());
+                               for (int i = 0; i < Enclosing_Instance.terms.Count; i++)
+                               {
+                                       if (i != 0)
+                                       {
+                                               query.Append(" ");
+                                       }
+                                       
+                                       Term term = (Term) Enclosing_Instance.terms[i];
+                                       
+                                       query.Append(term.Text());
+                               }
+                               query.Append('\"');
+                               
+                               Explanation idfExpl = new Explanation(idf, "idf(" + Enclosing_Instance.field + ":" + docFreqs + ")");
+                               
+                               // explain query weight
+                               Explanation queryExpl = new Explanation();
+                               queryExpl.SetDescription("queryWeight(" + GetQuery() + "), product of:");
+                               
+                               Explanation boostExpl = new Explanation(Enclosing_Instance.GetBoost(), "boost");
+                               if (Enclosing_Instance.GetBoost() != 1.0f)
+                                       queryExpl.AddDetail(boostExpl);
+                               queryExpl.AddDetail(idfExpl);
+                               
+                               Explanation queryNormExpl = new Explanation(queryNorm, "queryNorm");
+                               queryExpl.AddDetail(queryNormExpl);
+                               
+                               queryExpl.SetValue(boostExpl.GetValue() * idfExpl.GetValue() * queryNormExpl.GetValue());
+                               
+                               result.AddDetail(queryExpl);
+                               
+                               // explain field weight
+                               Explanation fieldExpl = new Explanation();
+                               fieldExpl.SetDescription("fieldWeight(" + Enclosing_Instance.field + ":" + query + " in " + doc + "), product of:");
+                               
+                               Scorer scorer = Scorer(reader, true, false);
+                               if (scorer == null)
+                               {
+                                       return new Explanation(0.0f, "no matching docs");
+                               }
+                               Explanation tfExpl = scorer.Explain(doc);
+                               fieldExpl.AddDetail(tfExpl);
+                               fieldExpl.AddDetail(idfExpl);
+                               
+                               Explanation fieldNormExpl = new Explanation();
+                               byte[] fieldNorms = reader.Norms(Enclosing_Instance.field);
+                               float fieldNorm = fieldNorms != null?Similarity.DecodeNorm(fieldNorms[doc]):1.0f;
+                               fieldNormExpl.SetValue(fieldNorm);
+                               fieldNormExpl.SetDescription("fieldNorm(field=" + Enclosing_Instance.field + ", doc=" + doc + ")");
+                               fieldExpl.AddDetail(fieldNormExpl);
+                               
+                               fieldExpl.SetValue(tfExpl.GetValue() * idfExpl.GetValue() * fieldNormExpl.GetValue());
+                               
+                               result.AddDetail(fieldExpl);
+                               
+                               // combine them
+                               result.SetValue(queryExpl.GetValue() * fieldExpl.GetValue());
+                               
+                               if (queryExpl.GetValue() == 1.0f)
+                                       return fieldExpl;
+                               
+                               return result;
+                       }
+               }
+               
+               public override Weight CreateWeight(Searcher searcher)
+               {
+                       if (terms.Count == 1)
+                       {
+                               // optimize one-term case
+                               Term term = (Term) terms[0];
+                               Query termQuery = new TermQuery(term);
+                               termQuery.SetBoost(GetBoost());
+                               return termQuery.CreateWeight(searcher);
+                       }
+                       return new PhraseWeight(this, searcher);
+               }
+               
+               /// <seealso cref="Mono.Lucene.Net.Search.Query.ExtractTerms(java.util.Set)">
+               /// </seealso>
+               public override void  ExtractTerms(System.Collections.Hashtable queryTerms)
+               {
+                       SupportClass.CollectionsHelper.AddAllIfNotContains(queryTerms, terms);
+               }
+               
+               /// <summary>Prints a user-readable version of this query. </summary>
+               public override System.String ToString(System.String f)
+               {
+                       System.Text.StringBuilder buffer = new System.Text.StringBuilder();
+                       if (field != null && !field.Equals(f))
+                       {
+                               buffer.Append(field);
+                               buffer.Append(":");
+                       }
+                       
+                       buffer.Append("\"");
+                       System.String[] pieces = new System.String[maxPosition + 1];
+                       for (int i = 0; i < terms.Count; i++)
+                       {
+                               int pos = ((System.Int32) positions[i]);
+                               System.String s = pieces[pos];
+                               if (s == null)
+                               {
+                                       s = ((Term) terms[i]).Text();
+                               }
+                               else
+                               {
+                                       s = s + "|" + ((Term) terms[i]).Text();
+                               }
+                               pieces[pos] = s;
+                       }
+                       for (int i = 0; i < pieces.Length; i++)
+                       {
+                               if (i > 0)
+                               {
+                                       buffer.Append(' ');
+                               }
+                               System.String s = pieces[i];
+                               if (s == null)
+                               {
+                                       buffer.Append('?');
+                               }
+                               else
+                               {
+                                       buffer.Append(s);
+                               }
+                       }
+                       buffer.Append("\"");
+                       
+                       if (slop != 0)
+                       {
+                               buffer.Append("~");
+                               buffer.Append(slop);
+                       }
+                       
+                       buffer.Append(ToStringUtils.Boost(GetBoost()));
+                       
+                       return buffer.ToString();
+               }
+               
+               /// <summary>Returns true iff <code>o</code> is equal to this. </summary>
+               public  override bool Equals(System.Object o)
+               {
+                       if (!(o is PhraseQuery))
+                               return false;
+                       PhraseQuery other = (PhraseQuery) o;
+                       return (this.GetBoost() == other.GetBoost()) && (this.slop == other.slop) && this.terms.Equals(other.terms) && this.positions.Equals(other.positions);
+               }
+               
+               /// <summary>Returns a hash code value for this object.</summary>
+               public override int GetHashCode()
+               {
+                       return BitConverter.ToInt32(BitConverter.GetBytes(GetBoost()), 0) ^ slop ^ terms.GetHashCode() ^ positions.GetHashCode();
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/PhraseQueue.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/PhraseQueue.cs
new file mode 100644 (file)
index 0000000..95bf302
--- /dev/null
@@ -0,0 +1,47 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using PriorityQueue = Mono.Lucene.Net.Util.PriorityQueue;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       sealed class PhraseQueue:PriorityQueue
+       {
+               internal PhraseQueue(int size)
+               {
+                       Initialize(size);
+               }
+               
+               public override bool LessThan(System.Object o1, System.Object o2)
+               {
+                       PhrasePositions pp1 = (PhrasePositions) o1;
+                       PhrasePositions pp2 = (PhrasePositions) o2;
+                       if (pp1.doc == pp2.doc)
+                               if (pp1.position == pp2.position)
+                               // same doc and pp.position, so decide by actual term positions. 
+                               // rely on: pp.position == tp.position - offset. 
+                                       return pp1.offset < pp2.offset;
+                               else
+                                       return pp1.position < pp2.position;
+                       else
+                               return pp1.doc < pp2.doc;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/PhraseScorer.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/PhraseScorer.cs
new file mode 100644 (file)
index 0000000..64ab445
--- /dev/null
@@ -0,0 +1,251 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using TermPositions = Mono.Lucene.Net.Index.TermPositions;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary>Expert: Scoring functionality for phrase queries.
+       /// <br/>A document is considered matching if it contains the phrase-query terms  
+       /// at "valid" positons. What "valid positions" are
+       /// depends on the type of the phrase query: for an exact phrase query terms are required 
+       /// to appear in adjacent locations, while for a sloppy phrase query some distance between 
+       /// the terms is allowed. The abstract method {@link #PhraseFreq()} of extending classes
+       /// is invoked for each document containing all the phrase query terms, in order to 
+       /// compute the frequency of the phrase query in that document. A non zero frequency
+       /// means a match. 
+       /// </summary>
+       abstract class PhraseScorer:Scorer
+       {
+               private Weight weight;
+               protected internal byte[] norms;
+               protected internal float value_Renamed;
+               
+               private bool firstTime = true;
+               private bool more = true;
+               protected internal PhraseQueue pq;
+               protected internal PhrasePositions first, last;
+               
+               private float freq; //prhase frequency in current doc as computed by phraseFreq().
+               
+               internal PhraseScorer(Weight weight, TermPositions[] tps, int[] offsets, Similarity similarity, byte[] norms):base(similarity)
+               {
+                       this.norms = norms;
+                       this.weight = weight;
+                       this.value_Renamed = weight.GetValue();
+                       
+                       // convert tps to a list of phrase positions.
+                       // note: phrase-position differs from term-position in that its position
+                       // reflects the phrase offset: pp.pos = tp.pos - offset.
+                       // this allows to easily identify a matching (exact) phrase 
+                       // when all PhrasePositions have exactly the same position.
+                       for (int i = 0; i < tps.Length; i++)
+                       {
+                               PhrasePositions pp = new PhrasePositions(tps[i], offsets[i]);
+                               if (last != null)
+                               {
+                                       // add next to end of list
+                                       last.next = pp;
+                               }
+                               else
+                               {
+                                       first = pp;
+                               }
+                               last = pp;
+                       }
+                       
+                       pq = new PhraseQueue(tps.Length); // construct empty pq
+                       first.doc = - 1;
+               }
+               
+               /// <deprecated> use {@link #DocID()} instead. 
+               /// </deprecated>
+        [Obsolete("use DocID() instead.")]
+               public override int Doc()
+               {
+                       return first.doc;
+               }
+               
+               public override int DocID()
+               {
+                       return first.doc;
+               }
+               
+               /// <deprecated> use {@link #NextDoc()} instead. 
+               /// </deprecated>
+        [Obsolete("use NextDoc() instead.")]
+               public override bool Next()
+               {
+                       return NextDoc() != NO_MORE_DOCS;
+               }
+               
+               public override int NextDoc()
+               {
+                       if (firstTime)
+                       {
+                               Init();
+                               firstTime = false;
+                       }
+                       else if (more)
+                       {
+                               more = last.Next(); // trigger further scanning
+                       }
+                       if (!DoNext())
+                       {
+                               first.doc = NO_MORE_DOCS;
+                       }
+                       return first.doc;
+               }
+               
+               // next without initial increment
+               private bool DoNext()
+               {
+                       while (more)
+                       {
+                               while (more && first.doc < last.doc)
+                               {
+                                       // find doc w/ all the terms
+                                       more = first.SkipTo(last.doc); // skip first upto last
+                                       FirstToLast(); // and move it to the end
+                               }
+                               
+                               if (more)
+                               {
+                                       // found a doc with all of the terms
+                                       freq = PhraseFreq(); // check for phrase
+                                       if (freq == 0.0f)
+                                       // no match
+                                               more = last.Next();
+                                       // trigger further scanning
+                                       else
+                                               return true; // found a match
+                               }
+                       }
+                       return false; // no more matches
+               }
+               
+               public override float Score()
+               {
+                       //System.out.println("scoring " + first.doc);
+                       float raw = GetSimilarity().Tf(freq) * value_Renamed; // raw score
+                       return norms == null?raw:raw * Similarity.DecodeNorm(norms[first.doc]); // normalize
+               }
+               
+               /// <deprecated> use {@link #Advance(int)} instead. 
+               /// </deprecated>
+        [Obsolete("use Advance(int) instead.")]
+               public override bool SkipTo(int target)
+               {
+                       return Advance(target) != NO_MORE_DOCS;
+               }
+               
+               public override int Advance(int target)
+               {
+                       firstTime = false;
+                       for (PhrasePositions pp = first; more && pp != null; pp = pp.next)
+                       {
+                               more = pp.SkipTo(target);
+                       }
+                       if (more)
+                       {
+                               Sort(); // re-sort
+                       }
+                       if (!DoNext())
+                       {
+                               first.doc = NO_MORE_DOCS;
+                       }
+                       return first.doc;
+               }
+               
+               /// <summary> For a document containing all the phrase query terms, compute the
+               /// frequency of the phrase in that document. 
+               /// A non zero frequency means a match.
+               /// <br/>Note, that containing all phrase terms does not guarantee a match - they have to be found in matching locations.  
+               /// </summary>
+               /// <returns> frequency of the phrase in current doc, 0 if not found. 
+               /// </returns>
+               protected internal abstract float PhraseFreq();
+               
+               private void  Init()
+               {
+                       for (PhrasePositions pp = first; more && pp != null; pp = pp.next)
+                       {
+                               more = pp.Next();
+                       }
+                       if (more)
+                       {
+                               Sort();
+                       }
+               }
+               
+               private void  Sort()
+               {
+                       pq.Clear();
+                       for (PhrasePositions pp = first; pp != null; pp = pp.next)
+                       {
+                               pq.Add(pp);
+                       }
+                       PqToList();
+               }
+               
+               protected internal void  PqToList()
+               {
+                       last = first = null;
+                       while (pq.Top() != null)
+                       {
+                               PhrasePositions pp = (PhrasePositions) pq.Pop();
+                               if (last != null)
+                               {
+                                       // add next to end of list
+                                       last.next = pp;
+                               }
+                               else
+                                       first = pp;
+                               last = pp;
+                               pp.next = null;
+                       }
+               }
+               
+               protected internal void  FirstToLast()
+               {
+                       last.next = first; // move first to end of list
+                       last = first;
+                       first = first.next;
+                       last.next = null;
+               }
+               
+               public override Explanation Explain(int doc)
+               {
+                       Explanation tfExplanation = new Explanation();
+                       
+                       int d = Advance(doc);
+                       float phraseFreq = (d == doc)?freq:0.0f;
+                       tfExplanation.SetValue(GetSimilarity().Tf(phraseFreq));
+                       tfExplanation.SetDescription("tf(phraseFreq=" + phraseFreq + ")");
+                       
+                       return tfExplanation;
+               }
+               
+               public override System.String ToString()
+               {
+                       return "scorer(" + weight + ")";
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/PositiveScoresOnlyCollector.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/PositiveScoresOnlyCollector.cs
new file mode 100644 (file)
index 0000000..6ea077e
--- /dev/null
@@ -0,0 +1,66 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary> A {@link Collector} implementation which wraps another
+       /// {@link Collector} and makes sure only documents with
+       /// scores &gt; 0 are collected.
+       /// </summary>
+       public class PositiveScoresOnlyCollector:Collector
+       {
+               
+               private Collector c;
+               private Scorer scorer;
+               
+               public PositiveScoresOnlyCollector(Collector c)
+               {
+                       this.c = c;
+               }
+               
+               public override void  Collect(int doc)
+               {
+                       if (scorer.Score() > 0)
+                       {
+                               c.Collect(doc);
+                       }
+               }
+               
+               public override void  SetNextReader(IndexReader reader, int docBase)
+               {
+                       c.SetNextReader(reader, docBase);
+               }
+               
+               public override void  SetScorer(Scorer scorer)
+               {
+                       // Set a ScoreCachingWrappingScorer in case the wrapped Collector will call
+                       // score() also.
+                       this.scorer = new ScoreCachingWrappingScorer(scorer);
+                       c.SetScorer(this.scorer);
+               }
+               
+               public override bool AcceptsDocsOutOfOrder()
+               {
+                       return c.AcceptsDocsOutOfOrder();
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/PrefixFilter.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/PrefixFilter.cs
new file mode 100644 (file)
index 0000000..e32baf3
--- /dev/null
@@ -0,0 +1,51 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using Term = Mono.Lucene.Net.Index.Term;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary> A Filter that restricts search results to values that have a matching prefix in a given
+       /// field.
+       /// </summary>
+       [Serializable]
+       public class PrefixFilter:MultiTermQueryWrapperFilter
+       {
+               
+               public PrefixFilter(Term prefix):base(new PrefixQuery(prefix))
+               {
+               }
+               
+               public virtual Term GetPrefix()
+               {
+                       return ((PrefixQuery) query).GetPrefix();
+               }
+               
+               /// <summary>Prints a user-readable version of this query. </summary>
+               public override System.String ToString()
+               {
+                       System.Text.StringBuilder buffer = new System.Text.StringBuilder();
+                       buffer.Append("PrefixFilter(");
+                       buffer.Append(GetPrefix().ToString());
+                       buffer.Append(")");
+                       return buffer.ToString();
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/PrefixQuery.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/PrefixQuery.cs
new file mode 100644 (file)
index 0000000..fa25ae4
--- /dev/null
@@ -0,0 +1,100 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+using Term = Mono.Lucene.Net.Index.Term;
+using ToStringUtils = Mono.Lucene.Net.Util.ToStringUtils;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary>A Query that matches documents containing terms with a specified prefix. A PrefixQuery
+       /// is built by QueryParser for input like <code>app*</code>.
+       /// 
+       /// <p/>This query uses the {@link
+       /// MultiTermQuery#CONSTANT_SCORE_AUTO_REWRITE_DEFAULT}
+       /// rewrite method. 
+       /// </summary>
+       [Serializable]
+       public class PrefixQuery:MultiTermQuery
+       {
+               private Term prefix;
+               
+               /// <summary>Constructs a query for terms starting with <code>prefix</code>. </summary>
+               public PrefixQuery(Term prefix):base(prefix)
+               { //will be removed in 3.0
+                       this.prefix = prefix;
+               }
+               
+               /// <summary>Returns the prefix of this query. </summary>
+               public virtual Term GetPrefix()
+               {
+                       return prefix;
+               }
+               
+               public /*protected internal*/ override FilteredTermEnum GetEnum(IndexReader reader)
+               {
+                       return new PrefixTermEnum(reader, prefix);
+               }
+               
+               /// <summary>Prints a user-readable version of this query. </summary>
+               public override System.String ToString(System.String field)
+               {
+                       System.Text.StringBuilder buffer = new System.Text.StringBuilder();
+                       if (!prefix.Field().Equals(field))
+                       {
+                               buffer.Append(prefix.Field());
+                               buffer.Append(":");
+                       }
+                       buffer.Append(prefix.Text());
+                       buffer.Append('*');
+                       buffer.Append(ToStringUtils.Boost(GetBoost()));
+                       return buffer.ToString();
+               }
+               
+               //@Override
+               public override int GetHashCode()
+               {
+                       int prime = 31;
+                       int result = base.GetHashCode();
+                       result = prime * result + ((prefix == null)?0:prefix.GetHashCode());
+                       return result;
+               }
+               
+               //@Override
+               public  override bool Equals(System.Object obj)
+               {
+                       if (this == obj)
+                               return true;
+                       if (!base.Equals(obj))
+                               return false;
+                       if (GetType() != obj.GetType())
+                               return false;
+                       PrefixQuery other = (PrefixQuery) obj;
+                       if (prefix == null)
+                       {
+                               if (other.prefix != null)
+                                       return false;
+                       }
+                       else if (!prefix.Equals(other.prefix))
+                               return false;
+                       return true;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/PrefixTermEnum.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/PrefixTermEnum.cs
new file mode 100644 (file)
index 0000000..58d58a0
--- /dev/null
@@ -0,0 +1,71 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+using Term = Mono.Lucene.Net.Index.Term;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary> Subclass of FilteredTermEnum for enumerating all terms that match the
+       /// specified prefix filter term.
+       /// <p/>
+       /// Term enumerations are always ordered by Term.compareTo().  Each term in
+       /// the enumeration is greater than all that precede it.
+       /// 
+       /// </summary>
+       public class PrefixTermEnum:FilteredTermEnum
+       {
+               
+               private Term prefix;
+               private bool endEnum = false;
+               
+               public PrefixTermEnum(IndexReader reader, Term prefix)
+               {
+                       this.prefix = prefix;
+                       
+                       SetEnum(reader.Terms(new Term(prefix.Field(), prefix.Text())));
+               }
+               
+               public override float Difference()
+               {
+                       return 1.0f;
+               }
+               
+               public override bool EndEnum()
+               {
+                       return endEnum;
+               }
+               
+               protected internal virtual Term GetPrefixTerm()
+               {
+                       return prefix;
+               }
+               
+               public /*protected internal*/ override bool TermCompare(Term term)
+               {
+                       if ((System.Object) term.Field() == (System.Object) prefix.Field() && term.Text().StartsWith(prefix.Text()))
+                       {
+                               return true;
+                       }
+                       endEnum = true;
+                       return false;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Query.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Query.cs
new file mode 100644 (file)
index 0000000..33fc176
--- /dev/null
@@ -0,0 +1,269 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary>The abstract base class for queries.
+       /// <p/>Instantiable subclasses are:
+       /// <ul>
+       /// <li> {@link TermQuery} </li>
+    /// <li> {@link MultiTermQuery} </li>
+    /// <li> {@link BooleanQuery} </li>
+    /// <li> {@link WildcardQuery} </li>
+    /// <li> {@link PhraseQuery} </li>
+    /// <li> {@link PrefixQuery} </li>
+    /// <li> {@link MultiPhraseQuery} </li>
+    /// <li> {@link FuzzyQuery} </li>
+    /// <li> {@link TermRangeQuery} </li>
+    /// <li> {@link NumericRangeQuery} </li>
+    /// <li> {@link Mono.Lucene.Net.Search.Spans.SpanQuery} </li>
+       /// </ul>
+       /// <p/>A parser for queries is contained in:
+       /// <ul>
+    /// <li>{@link Mono.Lucene.Net.QueryParsers.QueryParser QueryParser} </li>
+       /// </ul>
+       /// </summary>
+       [Serializable]
+       public abstract class Query : System.ICloneable
+       {
+               private float boost = 1.0f; // query boost factor
+               
+               /// <summary>Sets the boost for this query clause to <code>b</code>.  Documents
+               /// matching this clause will (in addition to the normal weightings) have
+               /// their score multiplied by <code>b</code>.
+               /// </summary>
+               public virtual void  SetBoost(float b)
+               {
+                       boost = b;
+               }
+               
+               /// <summary>Gets the boost for this clause.  Documents matching
+               /// this clause will (in addition to the normal weightings) have their score
+               /// multiplied by <code>b</code>.   The boost is 1.0 by default.
+               /// </summary>
+               public virtual float GetBoost()
+               {
+                       return boost;
+               }
+               
+               /// <summary>Prints a query to a string, with <code>field</code> assumed to be the 
+               /// default field and omitted.
+               /// <p/>The representation used is one that is supposed to be readable
+               /// by {@link Mono.Lucene.Net.QueryParsers.QueryParser QueryParser}. However,
+               /// there are the following limitations:
+               /// <ul>
+               /// <li>If the query was created by the parser, the printed
+               /// representation may not be exactly what was parsed. For example,
+               /// characters that need to be escaped will be represented without
+               /// the required backslash.</li>
+               /// <li>Some of the more complicated queries (e.g. span queries)
+               /// don't have a representation that can be parsed by QueryParser.</li>
+               /// </ul>
+               /// </summary>
+               public abstract System.String ToString(System.String field);
+               
+               /// <summary>Prints a query to a string. </summary>
+               public override System.String ToString()
+               {
+                       return ToString("");
+               }
+               
+               /// <summary> Expert: Constructs an appropriate Weight implementation for this query.
+               /// 
+               /// <p/>
+               /// Only implemented by primitive queries, which re-write to themselves.
+               /// </summary>
+               public virtual Weight CreateWeight(Searcher searcher)
+               {
+                       throw new System.NotSupportedException();
+               }
+               
+               /// <summary> Expert: Constructs and initializes a Weight for a top-level query.</summary>
+               public virtual Weight Weight(Searcher searcher)
+               {
+                       Query query = searcher.Rewrite(this);
+                       Weight weight = query.CreateWeight(searcher);
+                       float sum = weight.SumOfSquaredWeights();
+                       float norm = GetSimilarity(searcher).QueryNorm(sum);
+            if (float.IsInfinity(norm) || float.IsNaN(norm))
+                norm = 1.0f;
+                       weight.Normalize(norm);
+                       return weight;
+               }
+               
+               
+               /// <summary>Expert: called to re-write queries into primitive queries. For example,
+               /// a PrefixQuery will be rewritten into a BooleanQuery that consists
+               /// of TermQuerys.
+               /// </summary>
+               public virtual Query Rewrite(IndexReader reader)
+               {
+                       return this;
+               }
+               
+               
+               /// <summary>Expert: called when re-writing queries under MultiSearcher.
+               /// 
+               /// Create a single query suitable for use by all subsearchers (in 1-1
+               /// correspondence with queries). This is an optimization of the OR of
+               /// all queries. We handle the common optimization cases of equal
+               /// queries and overlapping clauses of boolean OR queries (as generated
+               /// by MultiTermQuery.rewrite()).
+               /// Be careful overriding this method as queries[0] determines which
+               /// method will be called and is not necessarily of the same type as
+               /// the other queries.
+               /// </summary>
+               public virtual Query Combine(Query[] queries)
+               {
+            System.Collections.Hashtable uniques = new System.Collections.Hashtable();
+                       for (int i = 0; i < queries.Length; i++)
+                       {
+                               Query query = queries[i];
+                               BooleanClause[] clauses = null;
+                               // check if we can split the query into clauses
+                               bool splittable = (query is BooleanQuery);
+                               if (splittable)
+                               {
+                                       BooleanQuery bq = (BooleanQuery) query;
+                                       splittable = bq.IsCoordDisabled();
+                                       clauses = bq.GetClauses();
+                                       for (int j = 0; splittable && j < clauses.Length; j++)
+                                       {
+                                               splittable = (clauses[j].GetOccur() == BooleanClause.Occur.SHOULD);
+                                       }
+                               }
+                               if (splittable)
+                               {
+                                       for (int j = 0; j < clauses.Length; j++)
+                                       {
+                                               SupportClass.CollectionsHelper.AddIfNotContains(uniques, clauses[j].GetQuery());
+                                       }
+                               }
+                               else
+                               {
+                                       SupportClass.CollectionsHelper.AddIfNotContains(uniques, query);
+                               }
+                       }
+                       // optimization: if we have just one query, just return it
+                       if (uniques.Count == 1)
+                       {
+                foreach (object key in uniques.Keys)
+                {
+                    return (Query) key;
+                }
+                       }
+                       BooleanQuery result = new BooleanQuery(true);
+            foreach (object key in uniques.Keys)
+            {
+                result.Add((Query) key, BooleanClause.Occur.SHOULD);
+            }
+                       return result;
+               }
+               
+               
+               /// <summary> Expert: adds all terms occuring in this query to the terms set. Only
+               /// works if this query is in its {@link #rewrite rewritten} form.
+               /// 
+               /// </summary>
+               /// <throws>  UnsupportedOperationException if this query is not yet rewritten </throws>
+               public virtual void  ExtractTerms(System.Collections.Hashtable terms)
+               {
+                       // needs to be implemented by query subclasses
+                       throw new System.NotSupportedException();
+               }
+               
+               
+               
+               /// <summary>Expert: merges the clauses of a set of BooleanQuery's into a single
+               /// BooleanQuery.
+               /// 
+               /// <p/>A utility for use by {@link #Combine(Query[])} implementations.
+               /// </summary>
+               public static Query MergeBooleanQueries(BooleanQuery[] queries)
+               {
+            System.Collections.Hashtable allClauses = new System.Collections.Hashtable();
+                       for (int i = 0; i < queries.Length; i++)
+                       {
+                               BooleanClause[] clauses = queries[i].GetClauses();
+                               for (int j = 0; j < clauses.Length; j++)
+                               {
+                                       SupportClass.CollectionsHelper.AddIfNotContains(allClauses, clauses[j]);
+                               }
+                       }
+                       
+                       bool coordDisabled = queries.Length == 0?false:queries[0].IsCoordDisabled();
+                       BooleanQuery result = new BooleanQuery(coordDisabled);
+                       System.Collections.IEnumerator i2 = allClauses.GetEnumerator();
+                       while (i2.MoveNext())
+                       {
+                               result.Add((BooleanClause) i2.Current);
+                       }
+                       return result;
+               }
+               
+               
+               /// <summary>Expert: Returns the Similarity implementation to be used for this query.
+               /// Subclasses may override this method to specify their own Similarity
+               /// implementation, perhaps one that delegates through that of the Searcher.
+               /// By default the Searcher's Similarity implementation is returned.
+               /// </summary>
+               public virtual Similarity GetSimilarity(Searcher searcher)
+               {
+                       return searcher.GetSimilarity();
+               }
+               
+               /// <summary>Returns a clone of this query. </summary>
+               public virtual System.Object Clone()
+               {
+                       try
+                       {
+                               return base.MemberwiseClone();
+                       }
+                       catch (System.Exception e)
+                       {
+                               throw new System.SystemException("Clone not supported: " + e.Message);
+                       }
+               }
+               
+               public override int GetHashCode()
+               {
+                       int prime = 31;
+                       int result = 1;
+                       result = prime * result + BitConverter.ToInt32(BitConverter.GetBytes(boost), 0);
+                       return result;
+               }
+               
+               public  override bool Equals(System.Object obj)
+               {
+                       if (this == obj)
+                               return true;
+                       if (obj == null)
+                               return false;
+                       if (GetType() != obj.GetType())
+                               return false;
+                       Query other = (Query) obj;
+                       if (BitConverter.ToInt32(BitConverter.GetBytes(boost), 0) != BitConverter.ToInt32(BitConverter.GetBytes(other.boost), 0))
+                               return false;
+                       return true;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/QueryFilter.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/QueryFilter.cs
new file mode 100644 (file)
index 0000000..965a1c3
--- /dev/null
@@ -0,0 +1,55 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       
+       /// <summary>Constrains search results to only match those which also match a provided
+       /// query.  Results are cached, so that searches after the first on the same
+       /// index using this filter are much faster.
+       /// 
+       /// </summary>
+       /// <version>  $Id: QueryFilter.java 528298 2007-04-13 00:59:28Z hossman $
+       /// </version>
+       /// <deprecated> use a CachingWrapperFilter with QueryWrapperFilter
+       /// </deprecated>
+    [Obsolete("use a CachingWrapperFilter with QueryWrapperFilter")]
+       [Serializable]
+       public class QueryFilter:CachingWrapperFilter
+       {
+               
+               /// <summary>Constructs a filter which only matches documents matching
+               /// <code>query</code>.
+               /// </summary>
+               public QueryFilter(Query query):base(new QueryWrapperFilter(query))
+               {
+               }
+               
+               public  override bool Equals(System.Object o)
+               {
+                       return base.Equals((QueryFilter) o);
+               }
+               
+               public override int GetHashCode()
+               {
+                       return base.GetHashCode() ^ unchecked((int) 0x923F64B9);
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/QueryTermVector.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/QueryTermVector.cs
new file mode 100644 (file)
index 0000000..d611d30
--- /dev/null
@@ -0,0 +1,167 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using Analyzer = Mono.Lucene.Net.Analysis.Analyzer;
+using TokenStream = Mono.Lucene.Net.Analysis.TokenStream;
+using TermAttribute = Mono.Lucene.Net.Analysis.Tokenattributes.TermAttribute;
+using TermFreqVector = Mono.Lucene.Net.Index.TermFreqVector;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary> 
+       /// 
+       /// 
+       /// </summary>
+       public class QueryTermVector : TermFreqVector
+       {
+               private System.String[] terms = new System.String[0];
+               private int[] termFreqs = new int[0];
+               
+               public virtual System.String GetField()
+               {
+                       return null;
+               }
+               
+               /// <summary> </summary>
+               /// <param name="queryTerms">The original list of terms from the query, can contain duplicates
+               /// </param>
+               public QueryTermVector(System.String[] queryTerms)
+               {
+                       
+                       ProcessTerms(queryTerms);
+               }
+               
+               public QueryTermVector(System.String queryString, Analyzer analyzer)
+               {
+                       if (analyzer != null)
+                       {
+                               TokenStream stream = analyzer.TokenStream("", new System.IO.StringReader(queryString));
+                               if (stream != null)
+                               {
+                                       System.Collections.ArrayList terms = new System.Collections.ArrayList();
+                                       try
+                                       {
+                                               bool hasMoreTokens = false;
+                                               
+                                               stream.Reset();
+                                               TermAttribute termAtt = (TermAttribute) stream.AddAttribute(typeof(TermAttribute));
+                                               
+                                               hasMoreTokens = stream.IncrementToken();
+                                               while (hasMoreTokens)
+                                               {
+                                                       terms.Add(termAtt.Term());
+                                                       hasMoreTokens = stream.IncrementToken();
+                                               }
+                                               ProcessTerms((System.String[]) terms.ToArray(typeof(System.String)));
+                                       }
+                                       catch (System.IO.IOException e)
+                                       {
+                                       }
+                               }
+                       }
+               }
+               
+               private void  ProcessTerms(System.String[] queryTerms)
+               {
+                       if (queryTerms != null)
+                       {
+                               System.Array.Sort(queryTerms);
+                               System.Collections.IDictionary tmpSet = new System.Collections.Hashtable(queryTerms.Length);
+                               //filter out duplicates
+                               System.Collections.ArrayList tmpList = new System.Collections.ArrayList(queryTerms.Length);
+                               System.Collections.ArrayList tmpFreqs = new System.Collections.ArrayList(queryTerms.Length);
+                               int j = 0;
+                               for (int i = 0; i < queryTerms.Length; i++)
+                               {
+                                       System.String term = queryTerms[i];
+                                       System.Object temp_position = tmpSet[term];
+                                       if (temp_position == null)
+                                       {
+                                               tmpSet[term] = (System.Int32) j++;
+                                               tmpList.Add(term);
+                                               tmpFreqs.Add(1);
+                                       }
+                                       else
+                                       {
+                        System.Int32 position = (System.Int32) tmpSet[term];
+                                               System.Int32 integer = (System.Int32) tmpFreqs[position];
+                                               tmpFreqs[position] = (System.Int32) (integer + 1);
+                                       }
+                               }
+                terms = (System.String[]) tmpList.ToArray(typeof(System.String));
+                               //termFreqs = (int[])tmpFreqs.toArray(termFreqs);
+                               termFreqs = new int[tmpFreqs.Count];
+                               int i2 = 0;
+                               for (System.Collections.IEnumerator iter = tmpFreqs.GetEnumerator(); iter.MoveNext(); )
+                               {
+                                       System.Int32 integer = (System.Int32) iter.Current;
+                                       termFreqs[i2++] = integer;
+                               }
+                       }
+               }
+               
+               public override System.String ToString()
+               {
+                       System.Text.StringBuilder sb = new System.Text.StringBuilder();
+                       sb.Append('{');
+                       for (int i = 0; i < terms.Length; i++)
+                       {
+                               if (i > 0)
+                                       sb.Append(", ");
+                               sb.Append(terms[i]).Append('/').Append(termFreqs[i]);
+                       }
+                       sb.Append('}');
+                       return sb.ToString();
+               }
+               
+               
+               public virtual int Size()
+               {
+                       return terms.Length;
+               }
+               
+               public virtual System.String[] GetTerms()
+               {
+                       return terms;
+               }
+               
+               public virtual int[] GetTermFrequencies()
+               {
+                       return termFreqs;
+               }
+               
+               public virtual int IndexOf(System.String term)
+               {
+                       int res = System.Array.BinarySearch(terms, term);
+                       return res >= 0?res:- 1;
+               }
+               
+               public virtual int[] IndexesOf(System.String[] terms, int start, int len)
+               {
+                       int[] res = new int[len];
+                       
+                       for (int i = 0; i < len; i++)
+                       {
+                               res[i] = IndexOf(terms[i]);
+                       }
+                       return res;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/QueryWrapperFilter.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/QueryWrapperFilter.cs
new file mode 100644 (file)
index 0000000..d6dcd21
--- /dev/null
@@ -0,0 +1,159 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary> Constrains search results to only match those which also match a provided
+       /// query.  
+       /// 
+       /// <p/> This could be used, for example, with a {@link TermRangeQuery} on a suitably
+       /// formatted date field to implement date filtering.  One could re-use a single
+       /// QueryFilter that matches, e.g., only documents modified within the last
+       /// week.  The QueryFilter and TermRangeQuery would only need to be reconstructed
+       /// once per day.
+       /// 
+       /// </summary>
+       /// <version>  $Id:$
+       /// </version>
+       [Serializable]
+       public class QueryWrapperFilter:Filter
+       {
+               private class AnonymousClassCollector:Collector
+               {
+                       public AnonymousClassCollector(System.Collections.BitArray bits, QueryWrapperFilter enclosingInstance)
+                       {
+                               InitBlock(bits, enclosingInstance);
+                       }
+                       private void  InitBlock(System.Collections.BitArray bits, QueryWrapperFilter enclosingInstance)
+                       {
+                               this.bits = bits;
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private System.Collections.BitArray bits;
+                       private QueryWrapperFilter enclosingInstance;
+                       public QueryWrapperFilter Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       private int base_Renamed = 0;
+                       public override void  SetScorer(Scorer scorer)
+                       {
+                               // score is not needed by this collector 
+                       }
+                       public override void  Collect(int doc)
+                       {
+                for (int i = 0; doc + base_Renamed >= bits.Length; i =+ 64)
+                {
+                    bits.Length += i;
+                }
+                bits.Set(doc + base_Renamed, true); // set bit for hit
+                       }
+                       public override void  SetNextReader(IndexReader reader, int docBase)
+                       {
+                               base_Renamed = docBase;
+                       }
+                       public override bool AcceptsDocsOutOfOrder()
+                       {
+                               return true;
+                       }
+               }
+               private class AnonymousClassDocIdSet:DocIdSet
+               {
+                       public AnonymousClassDocIdSet(Mono.Lucene.Net.Search.Weight weight, Mono.Lucene.Net.Index.IndexReader reader, QueryWrapperFilter enclosingInstance)
+                       {
+                               InitBlock(weight, reader, enclosingInstance);
+                       }
+                       private void  InitBlock(Mono.Lucene.Net.Search.Weight weight, Mono.Lucene.Net.Index.IndexReader reader, QueryWrapperFilter enclosingInstance)
+                       {
+                               this.weight = weight;
+                               this.reader = reader;
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private Mono.Lucene.Net.Search.Weight weight;
+                       private Mono.Lucene.Net.Index.IndexReader reader;
+                       private QueryWrapperFilter enclosingInstance;
+                       public QueryWrapperFilter Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       public override DocIdSetIterator Iterator()
+                       {
+                               return weight.Scorer(reader, true, false);
+                       }
+                       public override bool IsCacheable()
+                       {
+                               return false;
+                       }
+               }
+               private Query query;
+               
+               /// <summary>Constructs a filter which only matches documents matching
+               /// <code>query</code>.
+               /// </summary>
+               public QueryWrapperFilter(Query query)
+               {
+                       this.query = query;
+               }
+               
+               /// <deprecated> Use {@link #GetDocIdSet(IndexReader)} instead.
+               /// </deprecated>
+        [Obsolete("Use GetDocIdSet(IndexReader) instead.")]
+               public override System.Collections.BitArray Bits(IndexReader reader)
+               {
+                       System.Collections.BitArray bits = new System.Collections.BitArray((reader.MaxDoc() % 64 == 0?reader.MaxDoc() / 64:reader.MaxDoc() / 64 + 1) * 64);
+                       
+                       new IndexSearcher(reader).Search(query, new AnonymousClassCollector(bits, this));
+                       return bits;
+               }
+               
+               public override DocIdSet GetDocIdSet(IndexReader reader)
+               {
+                       Weight weight = query.Weight(new IndexSearcher(reader));
+                       return new AnonymousClassDocIdSet(weight, reader, this);
+               }
+               
+               public override System.String ToString()
+               {
+                       return "QueryWrapperFilter(" + query + ")";
+               }
+               
+               public  override bool Equals(System.Object o)
+               {
+                       if (!(o is QueryWrapperFilter))
+                               return false;
+                       return this.query.Equals(((QueryWrapperFilter) o).query);
+               }
+               
+               public override int GetHashCode()
+               {
+                       return query.GetHashCode() ^ unchecked((int) 0x923F64B9);
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/RangeFilter.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/RangeFilter.cs
new file mode 100644 (file)
index 0000000..00e2113
--- /dev/null
@@ -0,0 +1,103 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary> A Filter that restricts search results to a range of values in a given
+       /// field.
+       /// 
+       /// <p/>This filter matches the documents looking for terms that fall into the
+       /// supplied range according to {@link String#compareTo(String)}. It is not intended
+       /// for numerical ranges, use {@link NumericRangeFilter} instead.
+       /// 
+       /// <p/>If you construct a large number of range filters with different ranges but on the 
+       /// same field, {@link FieldCacheRangeFilter} may have significantly better performance. 
+       /// 
+       /// </summary>
+       /// <deprecated> Use {@link TermRangeFilter} for term ranges or
+       /// {@link NumericRangeFilter} for numeric ranges instead.
+       /// This class will be removed in Lucene 3.0.
+       /// </deprecated>
+    [Obsolete("Use TermRangeFilter for term ranges or NumericRangeFilter for numeric ranges instead. This class will be removed in Lucene 3.0")]
+       [Serializable]
+       public class RangeFilter:MultiTermQueryWrapperFilter
+       {
+               
+               /// <param name="fieldName">The field this range applies to
+               /// </param>
+               /// <param name="lowerTerm">The lower bound on this range
+               /// </param>
+               /// <param name="upperTerm">The upper bound on this range
+               /// </param>
+               /// <param name="includeLower">Does this range include the lower bound?
+               /// </param>
+               /// <param name="includeUpper">Does this range include the upper bound?
+               /// </param>
+               /// <throws>  IllegalArgumentException if both terms are null or if </throws>
+               /// <summary>  lowerTerm is null and includeLower is true (similar for upperTerm
+               /// and includeUpper)
+               /// </summary>
+               public RangeFilter(System.String fieldName, System.String lowerTerm, System.String upperTerm, bool includeLower, bool includeUpper):base(new TermRangeQuery(fieldName, lowerTerm, upperTerm, includeLower, includeUpper))
+               {
+               }
+               
+               /// <summary> <strong>WARNING:</strong> Using this constructor and supplying a non-null
+               /// value in the <code>collator</code> parameter will cause every single 
+               /// index Term in the Field referenced by lowerTerm and/or upperTerm to be
+               /// examined.  Depending on the number of index Terms in this Field, the 
+               /// operation could be very slow.
+               /// 
+               /// </summary>
+               /// <param name="lowerTerm">The lower bound on this range
+               /// </param>
+               /// <param name="upperTerm">The upper bound on this range
+               /// </param>
+               /// <param name="includeLower">Does this range include the lower bound?
+               /// </param>
+               /// <param name="includeUpper">Does this range include the upper bound?
+               /// </param>
+               /// <param name="collator">The collator to use when determining range inclusion; set
+               /// to null to use Unicode code point ordering instead of collation.
+               /// </param>
+               /// <throws>  IllegalArgumentException if both terms are null or if </throws>
+               /// <summary>  lowerTerm is null and includeLower is true (similar for upperTerm
+               /// and includeUpper)
+               /// </summary>
+               public RangeFilter(System.String fieldName, System.String lowerTerm, System.String upperTerm, bool includeLower, bool includeUpper, System.Globalization.CompareInfo collator):base(new TermRangeQuery(fieldName, lowerTerm, upperTerm, includeLower, includeUpper, collator))
+               {
+               }
+               
+               /// <summary> Constructs a filter for field <code>fieldName</code> matching
+               /// less than or equal to <code>upperTerm</code>.
+               /// </summary>
+               public static RangeFilter Less(System.String fieldName, System.String upperTerm)
+               {
+                       return new RangeFilter(fieldName, null, upperTerm, false, true);
+               }
+               
+               /// <summary> Constructs a filter for field <code>fieldName</code> matching
+               /// greater than or equal to <code>lowerTerm</code>.
+               /// </summary>
+               public static RangeFilter More(System.String fieldName, System.String lowerTerm)
+               {
+                       return new RangeFilter(fieldName, lowerTerm, null, true, false);
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/RangeQuery.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/RangeQuery.cs
new file mode 100644 (file)
index 0000000..d560a16
--- /dev/null
@@ -0,0 +1,172 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+using Term = Mono.Lucene.Net.Index.Term;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary> A Query that matches documents within an exclusive range of terms.
+       /// 
+       /// <p/>This query matches the documents looking for terms that fall into the
+       /// supplied range according to {@link Term#CompareTo(Term)}. It is not intended
+       /// for numerical ranges, use {@link NumericRangeQuery} instead.
+       /// 
+       /// <p/>This query uses {@linkplain
+       /// MultiTermQuery#SCORING_BOOLEAN_QUERY_REWRITE}.  If you
+       /// want to change this, use the new {@link TermRangeQuery}
+       /// instead.
+       /// 
+       /// </summary>
+       /// <deprecated> Use {@link TermRangeQuery} for term ranges or
+       /// {@link NumericRangeQuery} for numeric ranges instead.
+       /// This class will be removed in Lucene 3.0.
+       /// </deprecated>
+    [Obsolete("Use TermRangeQuery for term ranges or NumericRangeQuery for numeric ranges instead. This class will be removed in Lucene 3.0")]
+       [Serializable]
+       public class RangeQuery:Query
+       {
+               private TermRangeQuery delegate_Renamed;
+               
+               /// <summary>Constructs a query selecting all terms greater than
+               /// <code>lowerTerm</code> but less than <code>upperTerm</code>.
+               /// There must be at least one term and either term may be null,
+               /// in which case there is no bound on that side, but if there are
+               /// two terms, both terms <b>must</b> be for the same field.
+               /// 
+               /// </summary>
+               /// <param name="lowerTerm">The Term at the lower end of the range
+               /// </param>
+               /// <param name="upperTerm">The Term at the upper end of the range
+               /// </param>
+               /// <param name="inclusive">If true, both <code>lowerTerm</code> and
+               /// <code>upperTerm</code> will themselves be included in the range.
+               /// </param>
+               public RangeQuery(Term lowerTerm, Term upperTerm, bool inclusive):this(lowerTerm, upperTerm, inclusive, null)
+               {
+               }
+               
+               /// <summary>Constructs a query selecting all terms greater than
+               /// <code>lowerTerm</code> but less than <code>upperTerm</code>.
+               /// There must be at least one term and either term may be null,
+               /// in which case there is no bound on that side, but if there are
+               /// two terms, both terms <b>must</b> be for the same field.
+               /// <p/>
+               /// If <code>collator</code> is not null, it will be used to decide whether
+               /// index terms are within the given range, rather than using the Unicode code
+               /// point order in which index terms are stored.
+               /// <p/>
+               /// <strong>WARNING:</strong> Using this constructor and supplying a non-null
+               /// value in the <code>collator</code> parameter will cause every single 
+               /// index Term in the Field referenced by lowerTerm and/or upperTerm to be
+               /// examined.  Depending on the number of index Terms in this Field, the 
+               /// operation could be very slow.
+               /// 
+               /// </summary>
+               /// <param name="lowerTerm">The Term at the lower end of the range
+               /// </param>
+               /// <param name="upperTerm">The Term at the upper end of the range
+               /// </param>
+               /// <param name="inclusive">If true, both <code>lowerTerm</code> and
+               /// <code>upperTerm</code> will themselves be included in the range.
+               /// </param>
+               /// <param name="collator">The collator to use to collate index Terms, to determine
+               /// their membership in the range bounded by <code>lowerTerm</code> and
+               /// <code>upperTerm</code>.
+               /// </param>
+               public RangeQuery(Term lowerTerm, Term upperTerm, bool inclusive, System.Globalization.CompareInfo collator)
+               {
+                       if (lowerTerm == null && upperTerm == null)
+                               throw new System.ArgumentException("At least one term must be non-null");
+                       if (lowerTerm != null && upperTerm != null && (System.Object) lowerTerm.Field() != (System.Object) upperTerm.Field())
+                               throw new System.ArgumentException("Both terms must have the same field");
+                       
+                       delegate_Renamed = new TermRangeQuery((lowerTerm == null)?upperTerm.Field():lowerTerm.Field(), (lowerTerm == null)?null:lowerTerm.Text(), (upperTerm == null)?null:upperTerm.Text(), inclusive, inclusive, collator);
+                       delegate_Renamed.SetRewriteMethod(TermRangeQuery.SCORING_BOOLEAN_QUERY_REWRITE);
+               }
+               
+               public override void  SetBoost(float b)
+               {
+                       base.SetBoost(b);
+                       delegate_Renamed.SetBoost(b);
+               }
+               
+               public override Query Rewrite(IndexReader reader)
+               {
+                       return delegate_Renamed.Rewrite(reader);
+               }
+               
+               /// <summary>Returns the field name for this query </summary>
+               public virtual System.String GetField()
+               {
+                       return delegate_Renamed.GetField();
+               }
+               
+               /// <summary>Returns the lower term of this range query. </summary>
+               public virtual Term GetLowerTerm()
+               {
+                       System.String term = delegate_Renamed.GetLowerTerm();
+                       return (term == null)?null:new Term(GetField(), term);
+               }
+               
+               /// <summary>Returns the upper term of this range query. </summary>
+               public virtual Term GetUpperTerm()
+               {
+                       System.String term = delegate_Renamed.GetUpperTerm();
+                       return (term == null)?null:new Term(GetField(), term);
+               }
+               
+               /// <summary>Returns <code>true</code> if the range query is inclusive </summary>
+               public virtual bool IsInclusive()
+               {
+                       return delegate_Renamed.IncludesLower() && delegate_Renamed.IncludesUpper();
+               }
+               
+               /// <summary>Returns the collator used to determine range inclusion, if any. </summary>
+               public virtual System.Globalization.CompareInfo GetCollator()
+               {
+                       return delegate_Renamed.GetCollator();
+               }
+               
+               /// <summary>Prints a user-readable version of this query. </summary>
+               public override System.String ToString(System.String field)
+               {
+                       return delegate_Renamed.ToString(field);
+               }
+               
+               /// <summary>Returns true iff <code>o</code> is equal to this. </summary>
+               public  override bool Equals(System.Object o)
+               {
+                       if (this == o)
+                               return true;
+                       if (!(o is RangeQuery))
+                               return false;
+                       
+                       RangeQuery other = (RangeQuery) o;
+                       return this.delegate_Renamed.Equals(other.delegate_Renamed);
+               }
+               
+               /// <summary>Returns a hash code value for this object.</summary>
+               public override int GetHashCode()
+               {
+                       return delegate_Renamed.GetHashCode();
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/ReqExclScorer.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/ReqExclScorer.cs
new file mode 100644 (file)
index 0000000..992922c
--- /dev/null
@@ -0,0 +1,179 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       
+       /// <summary>A Scorer for queries with a required subscorer
+       /// and an excluding (prohibited) sub DocIdSetIterator.
+       /// <br/>
+       /// This <code>Scorer</code> implements {@link Scorer#SkipTo(int)},
+       /// and it uses the skipTo() on the given scorers.
+       /// </summary>
+       class ReqExclScorer:Scorer
+       {
+               private Scorer reqScorer;
+               private DocIdSetIterator exclDisi;
+               private int doc = - 1;
+               
+               /// <summary>Construct a <code>ReqExclScorer</code>.</summary>
+               /// <param name="reqScorer">The scorer that must match, except where
+               /// </param>
+               /// <param name="exclDisi">indicates exclusion.
+               /// </param>
+               public ReqExclScorer(Scorer reqScorer, DocIdSetIterator exclDisi):base(null)
+               { // No similarity used.
+                       this.reqScorer = reqScorer;
+                       this.exclDisi = exclDisi;
+               }
+               
+               /// <deprecated> use {@link #NextDoc()} instead. 
+               /// </deprecated>
+        [Obsolete("use NextDoc() instead. ")]
+               public override bool Next()
+               {
+                       return NextDoc() != NO_MORE_DOCS;
+               }
+               
+               public override int NextDoc()
+               {
+                       if (reqScorer == null)
+                       {
+                               return doc;
+                       }
+                       doc = reqScorer.NextDoc();
+                       if (doc == NO_MORE_DOCS)
+                       {
+                               reqScorer = null; // exhausted, nothing left
+                               return doc;
+                       }
+                       if (exclDisi == null)
+                       {
+                               return doc;
+                       }
+                       return doc = ToNonExcluded();
+               }
+               
+               /// <summary>Advance to non excluded doc.
+               /// <br/>On entry:
+               /// <ul>
+               /// <li>reqScorer != null, </li>
+               /// <li>exclScorer != null, </li>
+               /// <li>reqScorer was advanced once via next() or skipTo() 
+        /// and reqScorer.doc() may still be excluded.</li>
+               /// </ul>
+               /// Advances reqScorer a non excluded required doc, if any.
+               /// </summary>
+               /// <returns> true iff there is a non excluded required doc.
+               /// </returns>
+               private int ToNonExcluded()
+               {
+                       int exclDoc = exclDisi.DocID();
+                       int reqDoc = reqScorer.DocID(); // may be excluded
+                       do 
+                       {
+                               if (reqDoc < exclDoc)
+                               {
+                                       return reqDoc; // reqScorer advanced to before exclScorer, ie. not excluded
+                               }
+                               else if (reqDoc > exclDoc)
+                               {
+                                       exclDoc = exclDisi.Advance(reqDoc);
+                                       if (exclDoc == NO_MORE_DOCS)
+                                       {
+                                               exclDisi = null; // exhausted, no more exclusions
+                                               return reqDoc;
+                                       }
+                                       if (exclDoc > reqDoc)
+                                       {
+                                               return reqDoc; // not excluded
+                                       }
+                               }
+                       }
+                       while ((reqDoc = reqScorer.NextDoc()) != NO_MORE_DOCS);
+                       reqScorer = null; // exhausted, nothing left
+                       return NO_MORE_DOCS;
+               }
+               
+               /// <deprecated> use {@link #DocID()} instead. 
+               /// </deprecated>
+        [Obsolete("use DocID() instead.")]
+               public override int Doc()
+               {
+                       return reqScorer.Doc(); // reqScorer may be null when next() or skipTo() already return false
+               }
+               
+               public override int DocID()
+               {
+                       return doc;
+               }
+               
+               /// <summary>Returns the score of the current document matching the query.
+               /// Initially invalid, until {@link #Next()} is called the first time.
+               /// </summary>
+               /// <returns> The score of the required scorer.
+               /// </returns>
+               public override float Score()
+               {
+                       return reqScorer.Score(); // reqScorer may be null when next() or skipTo() already return false
+               }
+               
+               /// <deprecated> use {@link #Advance(int)} instead. 
+               /// </deprecated>
+        [Obsolete("use Advance(int) instead.")]
+               public override bool SkipTo(int target)
+               {
+                       return Advance(target) != NO_MORE_DOCS;
+               }
+               
+               public override int Advance(int target)
+               {
+                       if (reqScorer == null)
+                       {
+                               return doc = NO_MORE_DOCS;
+                       }
+                       if (exclDisi == null)
+                       {
+                               return doc = reqScorer.Advance(target);
+                       }
+                       if (reqScorer.Advance(target) == NO_MORE_DOCS)
+                       {
+                               reqScorer = null;
+                               return doc = NO_MORE_DOCS;
+                       }
+                       return doc = ToNonExcluded();
+               }
+               
+               public override Explanation Explain(int doc)
+               {
+                       Explanation res = new Explanation();
+                       if (exclDisi.Advance(doc) == doc)
+                       {
+                               res.SetDescription("excluded");
+                       }
+                       else
+                       {
+                               res.SetDescription("not excluded");
+                               res.AddDetail(reqScorer.Explain(doc));
+                       }
+                       return res;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/ReqOptSumScorer.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/ReqOptSumScorer.cs
new file mode 100644 (file)
index 0000000..8ba1b81
--- /dev/null
@@ -0,0 +1,124 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary>A Scorer for queries with a required part and an optional part.
+       /// Delays skipTo() on the optional part until a score() is needed.
+       /// <br/>
+       /// This <code>Scorer</code> implements {@link Scorer#SkipTo(int)}.
+       /// </summary>
+       class ReqOptSumScorer:Scorer
+       {
+               /// <summary>The scorers passed from the constructor.
+               /// These are set to null as soon as their next() or skipTo() returns false.
+               /// </summary>
+               private Scorer reqScorer;
+               private Scorer optScorer;
+               
+               /// <summary>Construct a <code>ReqOptScorer</code>.</summary>
+               /// <param name="reqScorer">The required scorer. This must match.
+               /// </param>
+               /// <param name="optScorer">The optional scorer. This is used for scoring only.
+               /// </param>
+               public ReqOptSumScorer(Scorer reqScorer, Scorer optScorer):base(null)
+               { // No similarity used.
+                       this.reqScorer = reqScorer;
+                       this.optScorer = optScorer;
+               }
+               
+               /// <deprecated> use {@link #NextDoc()} instead. 
+               /// </deprecated>
+        [Obsolete("use NextDoc() instead.")]
+               public override bool Next()
+               {
+                       return reqScorer.Next();
+               }
+               
+               public override int NextDoc()
+               {
+                       return reqScorer.NextDoc();
+               }
+               
+               /// <deprecated> use {@link #Advance(int)} instead. 
+               /// </deprecated>
+        [Obsolete("use Advance(int) instead.")]
+               public override bool SkipTo(int target)
+               {
+                       return reqScorer.SkipTo(target);
+               }
+               
+               public override int Advance(int target)
+               {
+                       return reqScorer.Advance(target);
+               }
+               
+               /// <deprecated> use {@link #DocID()} instead. 
+               /// </deprecated>
+        [Obsolete("use DocID() instead.")]
+               public override int Doc()
+               {
+                       return reqScorer.Doc();
+               }
+               
+               public override int DocID()
+               {
+                       return reqScorer.DocID();
+               }
+               
+               /// <summary>Returns the score of the current document matching the query.
+               /// Initially invalid, until {@link #Next()} is called the first time.
+               /// </summary>
+               /// <returns> The score of the required scorer, eventually increased by the score
+               /// of the optional scorer when it also matches the current document.
+               /// </returns>
+               public override float Score()
+               {
+                       int curDoc = reqScorer.DocID();
+                       float reqScore = reqScorer.Score();
+                       if (optScorer == null)
+                       {
+                               return reqScore;
+                       }
+                       
+                       int optScorerDoc = optScorer.DocID();
+                       if (optScorerDoc < curDoc && (optScorerDoc = optScorer.Advance(curDoc)) == NO_MORE_DOCS)
+                       {
+                               optScorer = null;
+                               return reqScore;
+                       }
+                       
+                       return optScorerDoc == curDoc?reqScore + optScorer.Score():reqScore;
+               }
+               
+               /// <summary>Explain the score of a document.
+               /// TODO: Also show the total score.
+               /// See BooleanScorer.explain() on how to do this.
+               /// </summary>
+               public override Explanation Explain(int doc)
+               {
+                       Explanation res = new Explanation();
+                       res.SetDescription("required, optional");
+                       res.AddDetail(reqScorer.Explain(doc));
+                       res.AddDetail(optScorer.Explain(doc));
+                       return res;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/ScoreCachingWrappingScorer.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/ScoreCachingWrappingScorer.cs
new file mode 100644 (file)
index 0000000..92640df
--- /dev/null
@@ -0,0 +1,117 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary> A {@link Scorer} which wraps another scorer and caches the score of the
+       /// current document. Successive calls to {@link #Score()} will return the same
+       /// result and will not invoke the wrapped Scorer's score() method, unless the
+       /// current document has changed.<br/>
+       /// This class might be useful due to the changes done to the {@link Collector}
+       /// interface, in which the score is not computed for a document by default, only
+       /// if the collector requests it. Some collectors may need to use the score in
+       /// several places, however all they have in hand is a {@link Scorer} object, and
+       /// might end up computing the score of a document more than once.
+       /// </summary>
+       public class ScoreCachingWrappingScorer:Scorer
+       {
+               
+               private Scorer scorer;
+               private int curDoc = - 1;
+               private float curScore;
+               
+               /// <summary>Creates a new instance by wrapping the given scorer. </summary>
+               public ScoreCachingWrappingScorer(Scorer scorer):base(scorer.GetSimilarity())
+               {
+                       this.scorer = scorer;
+               }
+               
+               public /*protected internal*/ override bool Score(Collector collector, int max, int firstDocID)
+               {
+                       return scorer.Score(collector, max, firstDocID);
+               }
+               
+               public override Similarity GetSimilarity()
+               {
+                       return scorer.GetSimilarity();
+               }
+               
+               public override Explanation Explain(int doc)
+               {
+                       return scorer.Explain(doc);
+               }
+               
+               public override float Score()
+               {
+                       int doc = scorer.DocID();
+                       if (doc != curDoc)
+                       {
+                               curScore = scorer.Score();
+                               curDoc = doc;
+                       }
+                       
+                       return curScore;
+               }
+               
+               /// <deprecated> use {@link #DocID()} instead. 
+               /// </deprecated>
+        [Obsolete("use DocID() instead.")]
+               public override int Doc()
+               {
+                       return scorer.Doc();
+               }
+               
+               public override int DocID()
+               {
+                       return scorer.DocID();
+               }
+               
+               /// <deprecated> use {@link #NextDoc()} instead. 
+               /// </deprecated>
+        [Obsolete("use NextDoc() instead.")]
+               public override bool Next()
+               {
+                       return scorer.Next();
+               }
+               
+               public override int NextDoc()
+               {
+                       return scorer.NextDoc();
+               }
+               
+               public override void  Score(Collector collector)
+               {
+                       scorer.Score(collector);
+               }
+               
+               /// <deprecated> use {@link #Advance(int)} instead. 
+               /// </deprecated>
+        [Obsolete("use Advance(int) instead.")]
+               public override bool SkipTo(int target)
+               {
+                       return scorer.SkipTo(target);
+               }
+               
+               public override int Advance(int target)
+               {
+                       return scorer.Advance(target);
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/ScoreDoc.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/ScoreDoc.cs
new file mode 100644 (file)
index 0000000..fa1a840
--- /dev/null
@@ -0,0 +1,50 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary>Expert: Returned by low-level search implementations.</summary>
+       /// <seealso cref="TopDocs">
+       /// </seealso>
+       [Serializable]
+       public class ScoreDoc
+       {
+               /// <summary>Expert: The score of this document for the query. </summary>
+               public float score;
+               
+               /// <summary>Expert: A hit document's number.</summary>
+               /// <seealso cref="Searcher.Doc(int)">
+               /// </seealso>
+               public int doc;
+               
+               /// <summary>Expert: Constructs a ScoreDoc. </summary>
+               public ScoreDoc(int doc, float score)
+               {
+                       this.doc = doc;
+                       this.score = score;
+               }
+               
+               // A convenience method for debugging.
+               public override System.String ToString()
+               {
+                       return "doc=" + doc + " score=" + score;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/ScoreDocComparator.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/ScoreDocComparator.cs
new file mode 100644 (file)
index 0000000..d5bad90
--- /dev/null
@@ -0,0 +1,129 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary> Expert: Compares two ScoreDoc objects for sorting.
+       /// 
+       /// <p/>Created: Feb 3, 2004 9:00:16 AM 
+       /// 
+       /// </summary>
+       /// <since>   lucene 1.4
+       /// </since>
+       /// <version>  $Id: ScoreDocComparator.java 738219 2009-01-27 20:15:21Z mikemccand $
+       /// </version>
+       /// <deprecated> use {@link FieldComparator}
+       /// </deprecated>
+    [Obsolete("use FieldComparator")]
+       public struct ScoreDocComparator_Fields{
+               /// <summary>Special comparator for sorting hits according to computed relevance (document score). </summary>
+               public readonly static ScoreDocComparator RELEVANCE;
+               /// <summary>Special comparator for sorting hits according to index order (document number). </summary>
+               public readonly static ScoreDocComparator INDEXORDER;
+        static ScoreDocComparator_Fields()
+               {
+                       RELEVANCE = new AnonymousClassScoreDocComparator();
+                       INDEXORDER = new AnonymousClassScoreDocComparator1();
+               }
+       }
+       class AnonymousClassScoreDocComparator : ScoreDocComparator
+       {
+               public virtual int Compare(ScoreDoc i, ScoreDoc j)
+               {
+                       if (i.score > j.score)
+                               return - 1;
+                       if (i.score < j.score)
+                               return 1;
+                       return 0;
+               }
+               public virtual System.IComparable SortValue(ScoreDoc i)
+               {
+                       return (float) i.score;
+               }
+               public virtual int SortType()
+               {
+                       return SortField.SCORE;
+               }
+       }
+       class AnonymousClassScoreDocComparator1 : ScoreDocComparator
+       {
+               public virtual int Compare(ScoreDoc i, ScoreDoc j)
+               {
+                       if (i.doc < j.doc)
+                               return - 1;
+                       if (i.doc > j.doc)
+                               return 1;
+                       return 0;
+               }
+               public virtual System.IComparable SortValue(ScoreDoc i)
+               {
+                       return (System.Int32) i.doc;
+               }
+               public virtual int SortType()
+               {
+                       return SortField.DOC;
+               }
+       }
+       public interface ScoreDocComparator
+       {
+               
+               /// <summary> Compares two ScoreDoc objects and returns a result indicating their
+               /// sort order.
+               /// </summary>
+               /// <param name="i">First ScoreDoc
+               /// </param>
+               /// <param name="j">Second ScoreDoc
+               /// </param>
+               /// <returns> a negative integer if <code>i</code> should come before <code>j</code><br/>
+               /// a positive integer if <code>i</code> should come after <code>j</code><br/>
+               /// <code>0</code> if they are equal
+               /// </returns>
+               /// <seealso cref="java.util.Comparator">
+               /// </seealso>
+               int Compare(ScoreDoc i, ScoreDoc j);
+               
+               /// <summary> Returns the value used to sort the given document.  The
+               /// object returned must implement the java.io.Serializable
+               /// interface.  This is used by multisearchers to determine how
+               /// to collate results from their searchers.
+               /// </summary>
+               /// <seealso cref="FieldDoc">
+               /// </seealso>
+               /// <param name="i">Document
+               /// </param>
+               /// <returns> Serializable object
+               /// </returns>
+               System.IComparable SortValue(ScoreDoc i);
+               
+               /// <summary> Returns the type of sort.  Should return <code>SortField.SCORE</code>,
+               /// <code>SortField.DOC</code>, <code>SortField.STRING</code>,
+               /// <code>SortField.INTEGER</code>, <code>SortField.FLOAT</code> or
+               /// <code>SortField.CUSTOM</code>.  It is not valid to return
+               /// <code>SortField.AUTO</code>.
+               /// This is used by multisearchers to determine how to collate results
+               /// from their searchers.
+               /// </summary>
+               /// <returns> One of the constants in SortField.
+               /// </returns>
+               /// <seealso cref="SortField">
+               /// </seealso>
+               int SortType();
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Scorer.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Scorer.cs
new file mode 100644 (file)
index 0000000..43e1861
--- /dev/null
@@ -0,0 +1,156 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary> Expert: Common scoring functionality for different types of queries.
+       /// 
+       /// <p/>
+       /// A <code>Scorer</code> iterates over documents matching a
+       /// query in increasing order of doc Id.
+       /// <p/>
+       /// <p/>
+       /// Document scores are computed using a given <code>Similarity</code>
+       /// implementation.
+       /// <p/>
+       /// 
+       /// <p/><b>NOTE</b>: The values Float.Nan,
+       /// Float.NEGATIVE_INFINITY and Float.POSITIVE_INFINITY are
+       /// not valid scores.  Certain collectors (eg {@link
+       /// TopScoreDocCollector}) will not properly collect hits
+       /// with these scores.
+       /// 
+       /// </summary>
+       /// <seealso cref="BooleanQuery.setAllowDocsOutOfOrder">
+       /// </seealso>
+       public abstract class Scorer:DocIdSetIterator
+       {
+               private Similarity similarity;
+               
+               /// <summary>Constructs a Scorer.</summary>
+               /// <param name="similarity">The <code>Similarity</code> implementation used by this scorer.
+               /// </param>
+               protected internal Scorer(Similarity similarity)
+               {
+                       this.similarity = similarity;
+               }
+               
+               /// <summary>Returns the Similarity implementation used by this scorer. </summary>
+               public virtual Similarity GetSimilarity()
+               {
+                       return this.similarity;
+               }
+               
+               /// <summary>Scores and collects all matching documents.</summary>
+               /// <param name="hc">The collector to which all matching documents are passed through
+               /// {@link HitCollector#Collect(int, float)}.
+               /// <br/>When this method is used the {@link #Explain(int)} method should not be used.
+               /// </param>
+               /// <deprecated> use {@link #Score(Collector)} instead.
+               /// </deprecated>
+        [Obsolete("use Score(Collector) instead.")]
+               public virtual void  Score(HitCollector hc)
+               {
+                       Score(new HitCollectorWrapper(hc));
+               }
+               
+               /// <summary>Scores and collects all matching documents.</summary>
+               /// <param name="collector">The collector to which all matching documents are passed.
+               /// <br/>When this method is used the {@link #Explain(int)} method should not be used.
+               /// </param>
+               public virtual void  Score(Collector collector)
+               {
+                       collector.SetScorer(this);
+                       int doc;
+                       while ((doc = NextDoc()) != NO_MORE_DOCS)
+                       {
+                               collector.Collect(doc);
+                       }
+               }
+               
+               /// <summary>Expert: Collects matching documents in a range.  Hook for optimization.
+               /// Note that {@link #Next()} must be called once before this method is called
+               /// for the first time.
+               /// </summary>
+               /// <param name="hc">The collector to which all matching documents are passed through
+               /// {@link HitCollector#Collect(int, float)}.
+               /// </param>
+               /// <param name="max">Do not score documents past this.
+               /// </param>
+               /// <returns> true if more matching documents may remain.
+               /// </returns>
+               /// <deprecated> use {@link #Score(Collector, int, int)} instead.
+               /// </deprecated>
+        [Obsolete("use Score(Collector, int, int) instead")]
+               protected internal virtual bool Score(HitCollector hc, int max)
+               {
+                       return Score(new HitCollectorWrapper(hc), max, DocID());
+               }
+               
+               /// <summary> Expert: Collects matching documents in a range. Hook for optimization.
+               /// Note, <code>firstDocID</code> is added to ensure that {@link #NextDoc()}
+               /// was called before this method.
+               /// 
+               /// </summary>
+               /// <param name="collector">The collector to which all matching documents are passed.
+               /// </param>
+               /// <param name="max">Do not score documents past this.
+               /// </param>
+               /// <param name="firstDocID">
+               /// The first document ID (ensures {@link #NextDoc()} is called before
+               /// this method.
+               /// </param>
+               /// <returns> true if more matching documents may remain.
+               /// </returns>
+               public /*protected internal*/ virtual bool Score(Collector collector, int max, int firstDocID)
+               {
+                       collector.SetScorer(this);
+                       int doc = firstDocID;
+                       while (doc < max)
+                       {
+                               collector.Collect(doc);
+                               doc = NextDoc();
+                       }
+                       return doc != NO_MORE_DOCS;
+               }
+               
+               /// <summary>Returns the score of the current document matching the query.
+               /// Initially invalid, until {@link #Next()} or {@link #SkipTo(int)}
+               /// is called the first time, or when called from within
+               /// {@link Collector#collect}.
+               /// </summary>
+               public abstract float Score();
+               
+               /// <summary>Returns an explanation of the score for a document.
+               /// <br/>When this method is used, the {@link #Next()}, {@link #SkipTo(int)} and
+               /// {@link #Score(HitCollector)} methods should not be used.
+               /// </summary>
+               /// <param name="doc">The document number for the explanation.
+               /// 
+               /// </param>
+               /// <deprecated> Please use {@link IndexSearcher#explain}
+               /// or {@link Weight#explain} instead.
+               /// </deprecated>
+               public virtual Explanation Explain(int doc)
+               {
+                       throw new System.NotSupportedException();
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Searchable.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Searchable.cs
new file mode 100644 (file)
index 0000000..d8e4218
--- /dev/null
@@ -0,0 +1,198 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using Document = Mono.Lucene.Net.Documents.Document;
+using FieldSelector = Mono.Lucene.Net.Documents.FieldSelector;
+using CorruptIndexException = Mono.Lucene.Net.Index.CorruptIndexException;
+using Term = Mono.Lucene.Net.Index.Term;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary> The interface for search implementations.
+       /// 
+       /// <p/>
+       /// Searchable is the abstract network protocol for searching. Implementations
+       /// provide search over a single index, over multiple indices, and over indices
+       /// on remote servers.
+       /// 
+       /// <p/>
+       /// Queries, filters and sort criteria are designed to be compact so that they
+       /// may be efficiently passed to a remote index, with only the top-scoring hits
+       /// being returned, rather than every matching hit.
+       /// 
+       /// <b>NOTE:</b> this interface is kept public for convenience. Since it is not
+       /// expected to be implemented directly, it may be changed unexpectedly between
+       /// releases.
+       /// </summary>
+       public interface Searchable
+       {
+               
+               /// <summary>Lower-level search API.
+               /// 
+               /// <p/>{@link HitCollector#Collect(int,float)} is called for every non-zero
+               /// scoring document.
+               /// <br/>HitCollector-based access to remote indexes is discouraged.
+               /// 
+               /// <p/>Applications should only use this if they need <i>all</i> of the
+               /// matching documents.  The high-level search API ({@link
+               /// Searcher#Search(Query)}) is usually more efficient, as it skips
+               /// non-high-scoring hits.
+               /// 
+               /// </summary>
+               /// <param name="weight">to match documents
+               /// </param>
+               /// <param name="filter">if non-null, used to permit documents to be collected.
+               /// </param>
+               /// <param name="results">to receive hits
+               /// </param>
+               /// <throws>  BooleanQuery.TooManyClauses </throws>
+               /// <deprecated> use {@link #Search(Weight, Filter, Collector)} instead.
+               /// </deprecated>
+        [Obsolete("use Search(Weight, Filter, Collector) instead.")]
+               void  Search(Weight weight, Filter filter, HitCollector results);
+               
+               /// <summary> Lower-level search API.
+               /// 
+               /// <p/>
+               /// {@link Collector#Collect(int)} is called for every document. <br/>
+               /// Collector-based access to remote indexes is discouraged.
+               /// 
+               /// <p/>
+               /// Applications should only use this if they need <i>all</i> of the matching
+               /// documents. The high-level search API ({@link Searcher#Search(Query)}) is
+               /// usually more efficient, as it skips non-high-scoring hits.
+               /// 
+               /// </summary>
+               /// <param name="weight">to match documents
+               /// </param>
+               /// <param name="filter">if non-null, used to permit documents to be collected.
+               /// </param>
+               /// <param name="collector">to receive hits
+               /// </param>
+               /// <throws>  BooleanQuery.TooManyClauses </throws>
+               void  Search(Weight weight, Filter filter, Collector collector);
+               
+               /// <summary>Frees resources associated with this Searcher.
+               /// Be careful not to call this method while you are still using objects
+               /// like {@link Hits}.
+               /// </summary>
+               void  Close();
+               
+               /// <summary>Expert: Returns the number of documents containing <code>term</code>.
+               /// Called by search code to compute term weights.
+               /// </summary>
+               /// <seealso cref="Mono.Lucene.Net.Index.IndexReader.DocFreq(Term)">
+               /// </seealso>
+               int DocFreq(Term term);
+               
+               /// <summary>Expert: For each term in the terms array, calculates the number of
+               /// documents containing <code>term</code>. Returns an array with these
+               /// document frequencies. Used to minimize number of remote calls.
+               /// </summary>
+               int[] DocFreqs(Term[] terms);
+               
+               /// <summary>Expert: Returns one greater than the largest possible document number.
+               /// Called by search code to compute term weights.
+               /// </summary>
+               /// <seealso cref="Mono.Lucene.Net.Index.IndexReader.MaxDoc()">
+               /// </seealso>
+               int MaxDoc();
+               
+               /// <summary>Expert: Low-level search implementation.  Finds the top <code>n</code>
+               /// hits for <code>query</code>, applying <code>filter</code> if non-null.
+               /// 
+               /// <p/>Called by {@link Hits}.
+               /// 
+               /// <p/>Applications should usually call {@link Searcher#Search(Query)} or
+               /// {@link Searcher#Search(Query,Filter)} instead.
+               /// </summary>
+               /// <throws>  BooleanQuery.TooManyClauses </throws>
+               TopDocs Search(Weight weight, Filter filter, int n);
+               
+               /// <summary>Expert: Returns the stored fields of document <code>i</code>.
+               /// Called by {@link HitCollector} implementations.
+               /// </summary>
+               /// <seealso cref="Mono.Lucene.Net.Index.IndexReader.Document(int)">
+               /// </seealso>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               Document Doc(int i);
+               
+               /// <summary> Get the {@link Mono.Lucene.Net.Documents.Document} at the <code>n</code><sup>th</sup> position. The {@link Mono.Lucene.Net.Documents.FieldSelector}
+               /// may be used to determine what {@link Mono.Lucene.Net.Documents.Field}s to load and how they should be loaded.
+               /// 
+               /// <b>NOTE:</b> If the underlying Reader (more specifically, the underlying <code>FieldsReader</code>) is closed before the lazy {@link Mono.Lucene.Net.Documents.Field} is
+               /// loaded an exception may be thrown.  If you want the value of a lazy {@link Mono.Lucene.Net.Documents.Field} to be available after closing you must
+               /// explicitly load it or fetch the Document again with a new loader.
+               /// 
+               /// 
+               /// </summary>
+               /// <param name="n">Get the document at the <code>n</code><sup>th</sup> position
+               /// </param>
+               /// <param name="fieldSelector">The {@link Mono.Lucene.Net.Documents.FieldSelector} to use to determine what Fields should be loaded on the Document.  May be null, in which case all Fields will be loaded.
+               /// </param>
+               /// <returns> The stored fields of the {@link Mono.Lucene.Net.Documents.Document} at the nth position
+               /// </returns>
+               /// <throws>  CorruptIndexException if the index is corrupt </throws>
+               /// <throws>  IOException if there is a low-level IO error </throws>
+               /// <summary> 
+               /// </summary>
+               /// <seealso cref="Mono.Lucene.Net.Index.IndexReader.Document(int, FieldSelector)">
+               /// </seealso>
+               /// <seealso cref="Mono.Lucene.Net.Documents.Fieldable">
+               /// </seealso>
+               /// <seealso cref="Mono.Lucene.Net.Documents.FieldSelector">
+               /// </seealso>
+               /// <seealso cref="Mono.Lucene.Net.Documents.SetBasedFieldSelector">
+               /// </seealso>
+               /// <seealso cref="Mono.Lucene.Net.Documents.LoadFirstFieldSelector">
+               /// </seealso>
+               Document Doc(int n, FieldSelector fieldSelector);
+               
+               /// <summary>Expert: called to re-write queries into primitive queries.</summary>
+               /// <throws>  BooleanQuery.TooManyClauses </throws>
+               Query Rewrite(Query query);
+               
+               /// <summary>Expert: low-level implementation method
+               /// Returns an Explanation that describes how <code>doc</code> scored against
+               /// <code>weight</code>.
+               /// 
+               /// <p/>This is intended to be used in developing Similarity implementations,
+               /// and, for good performance, should not be displayed with every hit.
+               /// Computing an explanation is as expensive as executing the query over the
+               /// entire index.
+               /// <p/>Applications should call {@link Searcher#Explain(Query, int)}.
+               /// </summary>
+               /// <throws>  BooleanQuery.TooManyClauses </throws>
+               Explanation Explain(Weight weight, int doc);
+               
+               /// <summary>Expert: Low-level search implementation with arbitrary sorting.  Finds
+               /// the top <code>n</code> hits for <code>query</code>, applying
+               /// <code>filter</code> if non-null, and sorting the hits by the criteria in
+               /// <code>sort</code>.
+               /// 
+               /// <p/>Applications should usually call
+               /// {@link Searcher#Search(Query,Filter,int,Sort)} instead.
+               /// 
+               /// </summary>
+               /// <throws>  BooleanQuery.TooManyClauses </throws>
+               TopFieldDocs Search(Weight weight, Filter filter, int n, Sort sort);
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Searcher.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Searcher.cs
new file mode 100644 (file)
index 0000000..54adf27
--- /dev/null
@@ -0,0 +1,297 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using Document = Mono.Lucene.Net.Documents.Document;
+using CorruptIndexException = Mono.Lucene.Net.Index.CorruptIndexException;
+using Term = Mono.Lucene.Net.Index.Term;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary> An abstract base class for search implementations. Implements the main search
+       /// methods.
+       /// 
+       /// <p/>
+       /// Note that you can only access hits from a Searcher as long as it is not yet
+       /// closed, otherwise an IOException will be thrown.
+       /// </summary>
+       public abstract class Searcher : System.MarshalByRefObject, Searchable, System.IDisposable
+       {
+               public Searcher()
+               {
+                       InitBlock();
+               }
+               private void  InitBlock()
+               {
+                       similarity = Similarity.GetDefault();
+               }
+               
+               /// <summary>Returns the documents matching <code>query</code>. </summary>
+               /// <throws>  BooleanQuery.TooManyClauses </throws>
+               /// <deprecated> Hits will be removed in Lucene 3.0. Use
+               /// {@link #Search(Query, Filter, int)} instead.
+               /// </deprecated>
+        [Obsolete("Hits will be removed in Lucene 3.0. Use Search(Query, Filter, int) instead")]
+               public Hits Search(Query query)
+               {
+                       return Search(query, (Filter) null);
+               }
+               
+               /// <summary>Returns the documents matching <code>query</code> and
+               /// <code>filter</code>.
+               /// </summary>
+               /// <throws>  BooleanQuery.TooManyClauses </throws>
+               /// <deprecated> Hits will be removed in Lucene 3.0. Use
+               /// {@link #Search(Query, Filter, int)} instead.
+               /// </deprecated>
+        [Obsolete("Hits will be removed in Lucene 3.0. Use Search(Query, Filter, int) instead")]
+               public virtual Hits Search(Query query, Filter filter)
+               {
+                       return new Hits(this, query, filter);
+               }
+               
+               /// <summary>Returns documents matching <code>query</code> sorted by
+               /// <code>sort</code>.
+               /// </summary>
+               /// <throws>  BooleanQuery.TooManyClauses </throws>
+               /// <deprecated> Hits will be removed in Lucene 3.0. Use 
+               /// {@link #Search(Query, Filter, int, Sort)} instead.
+               /// </deprecated>
+        [Obsolete("Hits will be removed in Lucene 3.0. Use Search(Query, Filter, int, Sort) instead")]
+               public virtual Hits Search(Query query, Sort sort)
+               {
+                       return new Hits(this, query, null, sort);
+               }
+               
+               /// <summary>Returns documents matching <code>query</code> and <code>filter</code>,
+               /// sorted by <code>sort</code>.
+               /// </summary>
+               /// <throws>  BooleanQuery.TooManyClauses </throws>
+               /// <deprecated> Hits will be removed in Lucene 3.0. Use 
+               /// {@link #Search(Query, Filter, int, Sort)} instead.
+               /// </deprecated>
+        [Obsolete("Hits will be removed in Lucene 3.0. Use Search(Query, Filter, int, Sort) instead")]
+               public virtual Hits Search(Query query, Filter filter, Sort sort)
+               {
+                       return new Hits(this, query, filter, sort);
+               }
+               
+               /// <summary>Search implementation with arbitrary sorting.  Finds
+               /// the top <code>n</code> hits for <code>query</code>, applying
+               /// <code>filter</code> if non-null, and sorting the hits by the criteria in
+               /// <code>sort</code>.
+               /// 
+               /// <p/>NOTE: this does not compute scores by default; use
+               /// {@link IndexSearcher#setDefaultFieldSortScoring} to enable scoring.
+               /// 
+               /// </summary>
+               /// <throws>  BooleanQuery.TooManyClauses </throws>
+               public virtual TopFieldDocs Search(Query query, Filter filter, int n, Sort sort)
+               {
+                       return Search(CreateWeight(query), filter, n, sort);
+               }
+               
+               /// <summary>Lower-level search API.
+               /// 
+               /// <p/>{@link HitCollector#Collect(int,float)} is called for every matching
+               /// document.
+               /// 
+               /// <p/>Applications should only use this if they need <i>all</i> of the
+               /// matching documents.  The high-level search API ({@link
+               /// Searcher#Search(Query)}) is usually more efficient, as it skips
+               /// non-high-scoring hits.
+               /// <p/>Note: The <code>score</code> passed to this method is a raw score.
+               /// In other words, the score will not necessarily be a float whose value is
+               /// between 0 and 1.
+               /// </summary>
+               /// <throws>  BooleanQuery.TooManyClauses </throws>
+               /// <deprecated> use {@link #Search(Query, Collector)} instead.
+               /// </deprecated>
+        [Obsolete("use Search(Query, Collector) instead.")]
+               public virtual void  Search(Query query, HitCollector results)
+               {
+                       Search(CreateWeight(query), null, new HitCollectorWrapper(results));
+               }
+               
+               /// <summary>Lower-level search API.
+               /// 
+               /// <p/>{@link Collector#Collect(int)} is called for every matching document.
+               /// 
+               /// <p/>Applications should only use this if they need <i>all</i> of the matching
+               /// documents. The high-level search API ({@link Searcher#Search(Query, int)}
+               /// ) is usually more efficient, as it skips non-high-scoring hits.
+               /// <p/>Note: The <code>score</code> passed to this method is a raw score.
+               /// In other words, the score will not necessarily be a float whose value is
+               /// between 0 and 1.
+               /// </summary>
+               /// <throws>  BooleanQuery.TooManyClauses </throws>
+               public virtual void  Search(Query query, Collector results)
+               {
+                       Search(CreateWeight(query), null, results);
+               }
+               
+               /// <summary>Lower-level search API.
+               /// 
+               /// <p/>{@link HitCollector#Collect(int,float)} is called for every matching
+               /// document.
+               /// <br/>HitCollector-based access to remote indexes is discouraged.
+               /// 
+               /// <p/>Applications should only use this if they need <i>all</i> of the
+               /// matching documents.  The high-level search API ({@link
+               /// Searcher#Search(Query, Filter, int)}) is usually more efficient, as it skips
+               /// non-high-scoring hits.
+               /// 
+               /// </summary>
+               /// <param name="query">to match documents
+               /// </param>
+               /// <param name="filter">if non-null, used to permit documents to be collected.
+               /// </param>
+               /// <param name="results">to receive hits
+               /// </param>
+               /// <throws>  BooleanQuery.TooManyClauses </throws>
+               /// <deprecated> use {@link #Search(Query, Filter, Collector)} instead.
+               /// </deprecated>
+        [Obsolete("use Search(Query, Filter, Collector) instead.")]
+               public virtual void  Search(Query query, Filter filter, HitCollector results)
+               {
+                       Search(CreateWeight(query), filter, new HitCollectorWrapper(results));
+               }
+               
+               /// <summary>Lower-level search API.
+               /// 
+               /// <p/>{@link Collector#Collect(int)} is called for every matching
+               /// document.
+               /// <br/>Collector-based access to remote indexes is discouraged.
+               /// 
+               /// <p/>Applications should only use this if they need <i>all</i> of the
+               /// matching documents.  The high-level search API ({@link
+               /// Searcher#Search(Query, Filter, int)}) is usually more efficient, as it skips
+               /// non-high-scoring hits.
+               /// 
+               /// </summary>
+               /// <param name="query">to match documents
+               /// </param>
+               /// <param name="filter">if non-null, used to permit documents to be collected.
+               /// </param>
+               /// <param name="results">to receive hits
+               /// </param>
+               /// <throws>  BooleanQuery.TooManyClauses </throws>
+               public virtual void  Search(Query query, Filter filter, Collector results)
+               {
+                       Search(CreateWeight(query), filter, results);
+               }
+               
+               /// <summary>Finds the top <code>n</code>
+               /// hits for <code>query</code>, applying <code>filter</code> if non-null.
+               /// 
+               /// </summary>
+               /// <throws>  BooleanQuery.TooManyClauses </throws>
+               public virtual TopDocs Search(Query query, Filter filter, int n)
+               {
+                       return Search(CreateWeight(query), filter, n);
+               }
+               
+               /// <summary>Finds the top <code>n</code>
+               /// hits for <code>query</code>.
+               /// 
+               /// </summary>
+               /// <throws>  BooleanQuery.TooManyClauses </throws>
+               public virtual TopDocs Search(Query query, int n)
+               {
+                       return Search(query, null, n);
+               }
+               
+               /// <summary>Returns an Explanation that describes how <code>doc</code> scored against
+               /// <code>query</code>.
+               /// 
+               /// <p/>This is intended to be used in developing Similarity implementations,
+               /// and, for good performance, should not be displayed with every hit.
+               /// Computing an explanation is as expensive as executing the query over the
+               /// entire index.
+               /// </summary>
+               public virtual Explanation Explain(Query query, int doc)
+               {
+                       return Explain(CreateWeight(query), doc);
+               }
+               
+               /// <summary>The Similarity implementation used by this searcher. </summary>
+               private Similarity similarity;
+               
+               /// <summary>Expert: Set the Similarity implementation used by this Searcher.
+               /// 
+               /// </summary>
+               /// <seealso cref="Similarity.SetDefault(Similarity)">
+               /// </seealso>
+               public virtual void  SetSimilarity(Similarity similarity)
+               {
+                       this.similarity = similarity;
+               }
+               
+               /// <summary>Expert: Return the Similarity implementation used by this Searcher.
+               /// 
+               /// <p/>This defaults to the current value of {@link Similarity#GetDefault()}.
+               /// </summary>
+               public virtual Similarity GetSimilarity()
+               {
+                       return this.similarity;
+               }
+               
+               /// <summary> creates a weight for <code>query</code></summary>
+               /// <returns> new weight
+               /// </returns>
+               public /*protected internal*/ virtual Weight CreateWeight(Query query)
+               {
+                       return query.Weight(this);
+               }
+               
+               // inherit javadoc
+               public virtual int[] DocFreqs(Term[] terms)
+               {
+                       int[] result = new int[terms.Length];
+                       for (int i = 0; i < terms.Length; i++)
+                       {
+                               result[i] = DocFreq(terms[i]);
+                       }
+                       return result;
+               }
+               
+               /* The following abstract methods were added as a workaround for GCJ bug #15411.
+               * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=15411
+               */
+               /// <deprecated> use {@link #Search(Weight, Filter, Collector)} instead.
+               /// </deprecated>
+        [Obsolete("use Search(Weight, Filter, Collector) instead.")]
+               public virtual void  Search(Weight weight, Filter filter, HitCollector results)
+               {
+                       Search(weight, filter, new HitCollectorWrapper(results));
+               }
+               abstract public void  Search(Weight weight, Filter filter, Collector results);
+               abstract public void  Close();
+        abstract public void Dispose();
+               abstract public int DocFreq(Term term);
+               abstract public int MaxDoc();
+               abstract public TopDocs Search(Weight weight, Filter filter, int n);
+               abstract public Document Doc(int i);
+               abstract public Query Rewrite(Query query);
+               abstract public Explanation Explain(Weight weight, int doc);
+               abstract public TopFieldDocs Search(Weight weight, Filter filter, int n, Sort sort);
+               /* End patch for GCJ bug #15411. */
+               public abstract Mono.Lucene.Net.Documents.Document Doc(int param1, Mono.Lucene.Net.Documents.FieldSelector param2);
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Similarity.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Similarity.cs
new file mode 100644 (file)
index 0000000..e738877
--- /dev/null
@@ -0,0 +1,952 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using FieldInvertState = Mono.Lucene.Net.Index.FieldInvertState;
+using Term = Mono.Lucene.Net.Index.Term;
+using SmallFloat = Mono.Lucene.Net.Util.SmallFloat;
+using IDFExplanation = Mono.Lucene.Net.Search.Explanation.IDFExplanation;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary>Expert: Scoring API.
+       /// <p/>Subclasses implement search scoring.
+       /// 
+       /// <p/>The score of query <code>q</code> for document <code>d</code> correlates to the
+       /// cosine-distance or dot-product between document and query vectors in a
+       /// <a href="http://en.wikipedia.org/wiki/Vector_Space_Model">
+       /// Vector Space Model (VSM) of Information Retrieval</a>.
+       /// A document whose vector is closer to the query vector in that model is scored higher.
+       /// 
+       /// The score is computed as follows:
+       /// 
+       /// <p/>
+       /// <table cellpadding="1" cellspacing="0" border="1" align="center">
+       /// <tr><td>
+       /// <table cellpadding="1" cellspacing="0" border="0" align="center">
+       /// <tr>
+       /// <td valign="middle" align="right" rowspan="1">
+       /// score(q,d) &#160; = &#160;
+       /// <A HREF="#formula_coord">coord(q,d)</A> &#160;&#183;&#160;
+       /// <A HREF="#formula_queryNorm">queryNorm(q)</A> &#160;&#183;&#160;
+       /// </td>
+       /// <td valign="bottom" align="center" rowspan="1">
+       /// <big><big><big>&#8721;</big></big></big>
+       /// </td>
+       /// <td valign="middle" align="right" rowspan="1">
+       /// <big><big>(</big></big>
+       /// <A HREF="#formula_tf">tf(t in d)</A> &#160;&#183;&#160;
+       /// <A HREF="#formula_idf">idf(t)</A><sup>2</sup> &#160;&#183;&#160;
+       /// <A HREF="#formula_termBoost">t.getBoost()</A>&#160;&#183;&#160;
+       /// <A HREF="#formula_norm">norm(t,d)</A>
+       /// <big><big>)</big></big>
+       /// </td>
+       /// </tr>
+       /// <tr valigh="top">
+       /// <td></td>
+       /// <td align="center"><small>t in q</small></td>
+       /// <td></td>
+       /// </tr>
+       /// </table>
+       /// </td></tr>
+       /// </table>
+       /// 
+       /// <p/> where
+       /// <ol>
+       /// <li>
+       /// <A NAME="formula_tf"></A>
+       /// <b>tf(t in d)</b>
+       /// correlates to the term's <i>frequency</i>,
+       /// defined as the number of times term <i>t</i> appears in the currently scored document <i>d</i>.
+       /// Documents that have more occurrences of a given term receive a higher score.
+       /// The default computation for <i>tf(t in d)</i> in
+       /// {@link Mono.Lucene.Net.Search.DefaultSimilarity#Tf(float) DefaultSimilarity} is:
+       /// 
+       /// <br/>&#160;<br/>
+       /// <table cellpadding="2" cellspacing="2" border="0" align="center">
+       /// <tr>
+       /// <td valign="middle" align="right" rowspan="1">
+       /// {@link Mono.Lucene.Net.Search.DefaultSimilarity#Tf(float) tf(t in d)} &#160; = &#160;
+       /// </td>
+       /// <td valign="top" align="center" rowspan="1">
+       /// frequency<sup><big>&#189;</big></sup>
+       /// </td>
+       /// </tr>
+       /// </table>
+       /// <br/>&#160;<br/>
+       /// </li>
+       /// 
+       /// <li>
+       /// <A NAME="formula_idf"></A>
+       /// <b>idf(t)</b> stands for Inverse Document Frequency. This value
+       /// correlates to the inverse of <i>docFreq</i>
+       /// (the number of documents in which the term <i>t</i> appears).
+       /// This means rarer terms give higher contribution to the total score.
+       /// The default computation for <i>idf(t)</i> in
+       /// {@link Mono.Lucene.Net.Search.DefaultSimilarity#Idf(int, int) DefaultSimilarity} is:
+       /// 
+       /// <br/>&#160;<br/>
+       /// <table cellpadding="2" cellspacing="2" border="0" align="center">
+       /// <tr>
+       /// <td valign="middle" align="right">
+       /// {@link Mono.Lucene.Net.Search.DefaultSimilarity#Idf(int, int) idf(t)}&#160; = &#160;
+       /// </td>
+       /// <td valign="middle" align="center">
+       /// 1 + log <big>(</big>
+       /// </td>
+       /// <td valign="middle" align="center">
+       /// <table>
+       /// <tr><td align="center"><small>numDocs</small></td></tr>
+       /// <tr><td align="center">&#8211;&#8211;&#8211;&#8211;&#8211;&#8211;&#8211;&#8211;&#8211;</td></tr>
+       /// <tr><td align="center"><small>docFreq+1</small></td></tr>
+       /// </table>
+       /// </td>
+       /// <td valign="middle" align="center">
+       /// <big>)</big>
+       /// </td>
+       /// </tr>
+       /// </table>
+       /// <br/>&#160;<br/>
+       /// </li>
+       /// 
+       /// <li>
+       /// <A NAME="formula_coord"></A>
+       /// <b>coord(q,d)</b>
+       /// is a score factor based on how many of the query terms are found in the specified document.
+       /// Typically, a document that contains more of the query's terms will receive a higher score
+       /// than another document with fewer query terms.
+       /// This is a search time factor computed in
+       /// {@link #Coord(int, int) coord(q,d)}
+       /// by the Similarity in effect at search time.
+       /// <br/>&#160;<br/>
+       /// </li>
+       /// 
+       /// <li><b>
+       /// <A NAME="formula_queryNorm"></A>
+       /// queryNorm(q)
+       /// </b>
+       /// is a normalizing factor used to make scores between queries comparable.
+       /// This factor does not affect document ranking (since all ranked documents are multiplied by the same factor),
+       /// but rather just attempts to make scores from different queries (or even different indexes) comparable.
+       /// This is a search time factor computed by the Similarity in effect at search time.
+       /// 
+       /// The default computation in
+       /// {@link Mono.Lucene.Net.Search.DefaultSimilarity#QueryNorm(float) DefaultSimilarity}
+       /// is:
+       /// <br/>&#160;<br/>
+       /// <table cellpadding="1" cellspacing="0" border="0" align="center">
+       /// <tr>
+       /// <td valign="middle" align="right" rowspan="1">
+       /// queryNorm(q)  &#160; = &#160;
+       /// {@link Mono.Lucene.Net.Search.DefaultSimilarity#QueryNorm(float) queryNorm(sumOfSquaredWeights)}
+       /// &#160; = &#160;
+       /// </td>
+       /// <td valign="middle" align="center" rowspan="1">
+       /// <table>
+       /// <tr><td align="center"><big>1</big></td></tr>
+       /// <tr><td align="center"><big>
+       /// &#8211;&#8211;&#8211;&#8211;&#8211;&#8211;&#8211;&#8211;&#8211;&#8211;&#8211;&#8211;&#8211;&#8211;
+       /// </big></td></tr>
+       /// <tr><td align="center">sumOfSquaredWeights<sup><big>&#189;</big></sup></td></tr>
+       /// </table>
+       /// </td>
+       /// </tr>
+       /// </table>
+       /// <br/>&#160;<br/>
+       /// 
+       /// The sum of squared weights (of the query terms) is
+       /// computed by the query {@link Mono.Lucene.Net.Search.Weight} object.
+       /// For example, a {@link Mono.Lucene.Net.Search.BooleanQuery boolean query}
+       /// computes this value as:
+       /// 
+       /// <br/>&#160;<br/>
+       /// <table cellpadding="1" cellspacing="0" border="0" align="center">
+       /// <tr>
+       /// <td valign="middle" align="right" rowspan="1">
+       /// {@link Mono.Lucene.Net.Search.Weight#SumOfSquaredWeights() sumOfSquaredWeights} &#160; = &#160;
+       /// {@link Mono.Lucene.Net.Search.Query#GetBoost() q.getBoost()} <sup><big>2</big></sup>
+       /// &#160;&#183;&#160;
+       /// </td>
+       /// <td valign="bottom" align="center" rowspan="1">
+       /// <big><big><big>&#8721;</big></big></big>
+       /// </td>
+       /// <td valign="middle" align="right" rowspan="1">
+       /// <big><big>(</big></big>
+       /// <A HREF="#formula_idf">idf(t)</A> &#160;&#183;&#160;
+       /// <A HREF="#formula_termBoost">t.getBoost()</A>
+       /// <big><big>) <sup>2</sup> </big></big>
+       /// </td>
+       /// </tr>
+       /// <tr valigh="top">
+       /// <td></td>
+       /// <td align="center"><small>t in q</small></td>
+       /// <td></td>
+       /// </tr>
+       /// </table>
+       /// <br/>&#160;<br/>
+       /// 
+       /// </li>
+       /// 
+       /// <li>
+       /// <A NAME="formula_termBoost"></A>
+       /// <b>t.getBoost()</b>
+       /// is a search time boost of term <i>t</i> in the query <i>q</i> as
+       /// specified in the query text
+       /// (see <A HREF="../../../../../../queryparsersyntax.html#Boosting a Term">query syntax</A>),
+       /// or as set by application calls to
+       /// {@link Mono.Lucene.Net.Search.Query#SetBoost(float) setBoost()}.
+       /// Notice that there is really no direct API for accessing a boost of one term in a multi term query,
+       /// but rather multi terms are represented in a query as multi
+       /// {@link Mono.Lucene.Net.Search.TermQuery TermQuery} objects,
+       /// and so the boost of a term in the query is accessible by calling the sub-query
+       /// {@link Mono.Lucene.Net.Search.Query#GetBoost() getBoost()}.
+       /// <br/>&#160;<br/>
+       /// </li>
+       /// 
+       /// <li>
+       /// <A NAME="formula_norm"></A>
+       /// <b>norm(t,d)</b> encapsulates a few (indexing time) boost and length factors:
+       /// 
+       /// <ul>
+       /// <li><b>Document boost</b> - set by calling
+       /// {@link Mono.Lucene.Net.Documents.Document#SetBoost(float) doc.setBoost()}
+       /// before adding the document to the index.
+       /// </li>
+       /// <li><b>Field boost</b> - set by calling
+       /// {@link Mono.Lucene.Net.Documents.Fieldable#SetBoost(float) field.setBoost()}
+       /// before adding the field to a document.
+       /// </li>
+       /// <li>{@link #LengthNorm(String, int) <b>lengthNorm</b>(field)} - computed
+       /// when the document is added to the index in accordance with the number of tokens
+       /// of this field in the document, so that shorter fields contribute more to the score.
+       /// LengthNorm is computed by the Similarity class in effect at indexing.
+       /// </li>
+       /// </ul>
+       /// 
+       /// <p/>
+       /// When a document is added to the index, all the above factors are multiplied.
+       /// If the document has multiple fields with the same name, all their boosts are multiplied together:
+       /// 
+       /// <br/>&#160;<br/>
+       /// <table cellpadding="1" cellspacing="0" border="0" align="center">
+       /// <tr>
+       /// <td valign="middle" align="right" rowspan="1">
+       /// norm(t,d) &#160; = &#160;
+       /// {@link Mono.Lucene.Net.Documents.Document#GetBoost() doc.getBoost()}
+       /// &#160;&#183;&#160;
+       /// {@link #LengthNorm(String, int) lengthNorm(field)}
+       /// &#160;&#183;&#160;
+       /// </td>
+       /// <td valign="bottom" align="center" rowspan="1">
+    /// <big><big><big>&#8719;</big></big></big>
+       /// </td>
+       /// <td valign="middle" align="right" rowspan="1">
+       /// {@link Mono.Lucene.Net.Documents.Fieldable#GetBoost() f.getBoost}()
+       /// </td>
+       /// </tr>
+       /// <tr valigh="top">
+       /// <td></td>
+       /// <td align="center"><small>field <i><b>f</b></i> in <i>d</i> named as <i><b>t</b></i></small></td>
+       /// <td></td>
+       /// </tr>
+       /// </table>
+       /// <br/>&#160;<br/>
+       /// However the resulted <i>norm</i> value is {@link #EncodeNorm(float) encoded} as a single byte
+       /// before being stored.
+       /// At search time, the norm byte value is read from the index
+       /// {@link Mono.Lucene.Net.Store.Directory directory} and
+       /// {@link #DecodeNorm(byte) decoded} back to a float <i>norm</i> value.
+       /// This encoding/decoding, while reducing index size, comes with the price of
+       /// precision loss - it is not guaranteed that decode(encode(x)) = x.
+       /// For instance, decode(encode(0.89)) = 0.75.
+       /// Also notice that search time is too late to modify this <i>norm</i> part of scoring, e.g. by
+       /// using a different {@link Similarity} for search.
+       /// <br/>&#160;<br/>
+       /// </li>
+       /// </ol>
+       /// 
+       /// </summary>
+       /// <seealso cref="SetDefault(Similarity)">
+       /// </seealso>
+       /// <seealso cref="Mono.Lucene.Net.Index.IndexWriter.SetSimilarity(Similarity)">
+       /// </seealso>
+       /// <seealso cref="Searcher.SetSimilarity(Similarity)">
+       /// </seealso>
+       [Serializable]
+       public abstract class Similarity
+       {
+               public Similarity()
+               {
+                       InitBlock();
+               }
+               [Serializable]
+               private class AnonymousClassIDFExplanation:IDFExplanation
+               {
+                       public AnonymousClassIDFExplanation(float idf, Similarity enclosingInstance)
+                       {
+                               InitBlock(idf, enclosingInstance);
+                       }
+                       private void  InitBlock(float idf, Similarity enclosingInstance)
+                       {
+                               this.idf = idf;
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private float idf;
+                       private Similarity enclosingInstance;
+                       public Similarity Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       //@Override
+                       public override float GetIdf()
+                       {
+                               return idf;
+                       }
+                       //@Override
+                       public override System.String Explain()
+                       {
+                               return "Inexplicable";
+                       }
+               }
+               [Serializable]
+               private class AnonymousClassIDFExplanation1:IDFExplanation
+               {
+                       public AnonymousClassIDFExplanation1(int df, int max, float idf, Similarity enclosingInstance)
+                       {
+                               InitBlock(df, max, idf, enclosingInstance);
+                       }
+                       private void  InitBlock(int df, int max, float idf, Similarity enclosingInstance)
+                       {
+                               this.df = df;
+                               this.max = max;
+                               this.idf = idf;
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private int df;
+                       private int max;
+                       private float idf;
+                       private Similarity enclosingInstance;
+                       public Similarity Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       //@Override
+                       public override System.String Explain()
+                       {
+                               return "idf(docFreq=" + df + ", maxDocs=" + max + ")";
+                       }
+                       //@Override
+                       public override float GetIdf()
+                       {
+                               return idf;
+                       }
+               }
+               [Serializable]
+               private class AnonymousClassIDFExplanation2:IDFExplanation
+               {
+                       public AnonymousClassIDFExplanation2(float idf, Similarity enclosingInstance)
+                       {
+                               InitBlock(idf, enclosingInstance);
+                       }
+                       private void  InitBlock(float idf, Similarity enclosingInstance)
+                       {
+                               this.idf = idf;
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private float idf;
+                       private Similarity enclosingInstance;
+                       public Similarity Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       //@Override
+                       public override float GetIdf()
+                       {
+                               return idf;
+                       }
+                       //@Override
+                       public override System.String Explain()
+                       {
+                               return "Inexplicable";
+                       }
+               }
+               [Serializable]
+               private class AnonymousClassIDFExplanation3:IDFExplanation
+               {
+                       public AnonymousClassIDFExplanation3(float fIdf, System.Text.StringBuilder exp, Similarity enclosingInstance)
+                       {
+                               InitBlock(fIdf, exp, enclosingInstance);
+                       }
+                       private void  InitBlock(float fIdf, System.Text.StringBuilder exp, Similarity enclosingInstance)
+                       {
+                               this.fIdf = fIdf;
+                               this.exp = exp;
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private float fIdf;
+                       private System.Text.StringBuilder exp;
+                       private Similarity enclosingInstance;
+                       public Similarity Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       //@Override
+                       public override float GetIdf()
+                       {
+                               return fIdf;
+                       }
+                       //@Override
+                       public override System.String Explain()
+                       {
+                               return exp.ToString();
+                       }
+               }
+               private void  InitBlock()
+               {
+                       SupportedMethods = GetSupportedMethods(this.GetType());
+               }
+               
+               public const int NO_DOC_ID_PROVIDED = - 1;
+               
+               /// <summary>Set the default Similarity implementation used by indexing and search
+               /// code.
+               /// 
+               /// </summary>
+               /// <seealso cref="Searcher.SetSimilarity(Similarity)">
+               /// </seealso>
+               /// <seealso cref="Mono.Lucene.Net.Index.IndexWriter.SetSimilarity(Similarity)">
+               /// </seealso>
+               public static void  SetDefault(Similarity similarity)
+               {
+                       Similarity.defaultImpl = similarity;
+               }
+               
+               /// <summary>Return the default Similarity implementation used by indexing and search
+               /// code.
+               /// 
+               /// <p/>This is initially an instance of {@link DefaultSimilarity}.
+               /// 
+               /// </summary>
+               /// <seealso cref="Searcher.SetSimilarity(Similarity)">
+               /// </seealso>
+               /// <seealso cref="Mono.Lucene.Net.Index.IndexWriter.SetSimilarity(Similarity)">
+               /// </seealso>
+               public static Similarity GetDefault()
+               {
+                       return Similarity.defaultImpl;
+               }
+               
+               /// <summary>Cache of decoded bytes. </summary>
+               private static readonly float[] NORM_TABLE = new float[256];
+               
+               /// <summary>Decodes a normalization factor stored in an index.</summary>
+               /// <seealso cref="EncodeNorm(float)">
+               /// </seealso>
+               public static float DecodeNorm(byte b)
+               {
+                       return NORM_TABLE[b & 0xFF]; // & 0xFF maps negative bytes to positive above 127
+               }
+               
+               /// <summary>Returns a table for decoding normalization bytes.</summary>
+               /// <seealso cref="EncodeNorm(float)">
+               /// </seealso>
+               public static float[] GetNormDecoder()
+               {
+                       return NORM_TABLE;
+               }
+               
+               /// <summary> Compute the normalization value for a field, given the accumulated
+               /// state of term processing for this field (see {@link FieldInvertState}).
+               /// 
+               /// <p/>Implementations should calculate a float value based on the field
+               /// state and then return that value.
+               /// 
+               /// <p/>For backward compatibility this method by default calls
+               /// {@link #LengthNorm(String, int)} passing
+               /// {@link FieldInvertState#GetLength()} as the second argument, and
+               /// then multiplies this value by {@link FieldInvertState#GetBoost()}.<p/>
+               /// 
+               /// <p/><b>WARNING</b>: This API is new and experimental and may
+               /// suddenly change.<p/>
+               /// 
+               /// </summary>
+               /// <param name="field">field name
+               /// </param>
+               /// <param name="state">current processing state for this field
+               /// </param>
+               /// <returns> the calculated float norm
+               /// </returns>
+               public virtual float ComputeNorm(System.String field, FieldInvertState state)
+               {
+                       return (float) (state.GetBoost() * LengthNorm(field, state.GetLength()));
+               }
+               
+               /// <summary>Computes the normalization value for a field given the total number of
+               /// terms contained in a field.  These values, together with field boosts, are
+               /// stored in an index and multipled into scores for hits on each field by the
+               /// search code.
+               /// 
+               /// <p/>Matches in longer fields are less precise, so implementations of this
+               /// method usually return smaller values when <code>numTokens</code> is large,
+               /// and larger values when <code>numTokens</code> is small.
+               /// 
+               /// <p/>Note that the return values are computed under 
+               /// {@link Mono.Lucene.Net.Index.IndexWriter#AddDocument(Mono.Lucene.Net.Documents.Document)} 
+               /// and then stored using
+               /// {@link #EncodeNorm(float)}.  
+               /// Thus they have limited precision, and documents
+               /// must be re-indexed if this method is altered.
+               /// 
+               /// </summary>
+               /// <param name="fieldName">the name of the field
+               /// </param>
+               /// <param name="numTokens">the total number of tokens contained in fields named
+               /// <i>fieldName</i> of <i>doc</i>.
+               /// </param>
+               /// <returns> a normalization factor for hits on this field of this document
+               /// 
+               /// </returns>
+               /// <seealso cref="Mono.Lucene.Net.Documents.Field.SetBoost(float)">
+               /// </seealso>
+               public abstract float LengthNorm(System.String fieldName, int numTokens);
+               
+               /// <summary>Computes the normalization value for a query given the sum of the squared
+               /// weights of each of the query terms.  This value is then multipled into the
+               /// weight of each query term.
+               /// 
+               /// <p/>This does not affect ranking, but rather just attempts to make scores
+               /// from different queries comparable.
+               /// 
+               /// </summary>
+               /// <param name="sumOfSquaredWeights">the sum of the squares of query term weights
+               /// </param>
+               /// <returns> a normalization factor for query weights
+               /// </returns>
+               public abstract float QueryNorm(float sumOfSquaredWeights);
+               
+               /// <summary>Encodes a normalization factor for storage in an index.
+               /// 
+               /// <p/>The encoding uses a three-bit mantissa, a five-bit exponent, and
+               /// the zero-exponent point at 15, thus
+               /// representing values from around 7x10^9 to 2x10^-9 with about one
+               /// significant decimal digit of accuracy.  Zero is also represented.
+               /// Negative numbers are rounded up to zero.  Values too large to represent
+               /// are rounded down to the largest representable value.  Positive values too
+               /// small to represent are rounded up to the smallest positive representable
+               /// value.
+               /// 
+               /// </summary>
+               /// <seealso cref="Mono.Lucene.Net.Documents.Field.SetBoost(float)">
+               /// </seealso>
+               /// <seealso cref="Mono.Lucene.Net.Util.SmallFloat">
+               /// </seealso>
+               public static byte EncodeNorm(float f)
+               {
+                       return (byte) SmallFloat.FloatToByte315(f);
+               }
+               
+               
+               /// <summary>Computes a score factor based on a term or phrase's frequency in a
+               /// document.  This value is multiplied by the {@link #Idf(Term, Searcher)}
+               /// factor for each term in the query and these products are then summed to
+               /// form the initial score for a document.
+               /// 
+               /// <p/>Terms and phrases repeated in a document indicate the topic of the
+               /// document, so implementations of this method usually return larger values
+               /// when <code>freq</code> is large, and smaller values when <code>freq</code>
+               /// is small.
+               /// 
+               /// <p/>The default implementation calls {@link #Tf(float)}.
+               /// 
+               /// </summary>
+               /// <param name="freq">the frequency of a term within a document
+               /// </param>
+               /// <returns> a score factor based on a term's within-document frequency
+               /// </returns>
+               public virtual float Tf(int freq)
+               {
+                       return Tf((float) freq);
+               }
+               
+               /// <summary>Computes the amount of a sloppy phrase match, based on an edit distance.
+               /// This value is summed for each sloppy phrase match in a document to form
+               /// the frequency that is passed to {@link #Tf(float)}.
+               /// 
+               /// <p/>A phrase match with a small edit distance to a document passage more
+               /// closely matches the document, so implementations of this method usually
+               /// return larger values when the edit distance is small and smaller values
+               /// when it is large.
+               /// 
+               /// </summary>
+               /// <seealso cref="PhraseQuery.SetSlop(int)">
+               /// </seealso>
+               /// <param name="distance">the edit distance of this sloppy phrase match
+               /// </param>
+               /// <returns> the frequency increment for this match
+               /// </returns>
+               public abstract float SloppyFreq(int distance);
+               
+               /// <summary>Computes a score factor based on a term or phrase's frequency in a
+               /// document.  This value is multiplied by the {@link #Idf(Term, Searcher)}
+               /// factor for each term in the query and these products are then summed to
+               /// form the initial score for a document.
+               /// 
+               /// <p/>Terms and phrases repeated in a document indicate the topic of the
+               /// document, so implementations of this method usually return larger values
+               /// when <code>freq</code> is large, and smaller values when <code>freq</code>
+               /// is small.
+               /// 
+               /// </summary>
+               /// <param name="freq">the frequency of a term within a document
+               /// </param>
+               /// <returns> a score factor based on a term's within-document frequency
+               /// </returns>
+               public abstract float Tf(float freq);
+               
+               /// <summary>Computes a score factor for a simple term.
+               /// 
+               /// <p/>The default implementation is:<pre>
+               /// return idf(searcher.docFreq(term), searcher.maxDoc());
+               /// </pre>
+               /// 
+               /// Note that {@link Searcher#MaxDoc()} is used instead of
+               /// {@link Mono.Lucene.Net.Index.IndexReader#NumDocs()} because it is proportional to
+               /// {@link Searcher#DocFreq(Term)} , i.e., when one is inaccurate,
+               /// so is the other, and in the same direction.
+               /// 
+               /// </summary>
+               /// <param name="term">the term in question
+               /// </param>
+               /// <param name="searcher">the document collection being searched
+               /// </param>
+               /// <returns> a score factor for the term
+               /// </returns>
+               /// <deprecated> see {@link #IdfExplain(Term, Searcher)}
+               /// </deprecated>
+        [Obsolete("see IdfExplain(Term, Searcher)")]
+               public virtual float Idf(Term term, Searcher searcher)
+               {
+                       return Idf(searcher.DocFreq(term), searcher.MaxDoc());
+               }
+               
+               /// <summary> Computes a score factor for a simple term and returns an explanation
+               /// for that score factor.
+               /// 
+               /// <p/>
+               /// The default implementation uses:
+               /// 
+               /// <pre>
+               /// idf(searcher.docFreq(term), searcher.maxDoc());
+               /// </pre>
+               /// 
+               /// Note that {@link Searcher#MaxDoc()} is used instead of
+               /// {@link Mono.Lucene.Net.Index.IndexReader#NumDocs()} because it is
+               /// proportional to {@link Searcher#DocFreq(Term)} , i.e., when one is
+               /// inaccurate, so is the other, and in the same direction.
+               /// 
+               /// </summary>
+               /// <param name="term">the term in question
+               /// </param>
+               /// <param name="searcher">the document collection being searched
+               /// </param>
+               /// <returns> an IDFExplain object that includes both an idf score factor 
+               /// and an explanation for the term.
+               /// </returns>
+               /// <throws>  IOException </throws>
+               public virtual IDFExplanation IdfExplain(Term term, Searcher searcher)
+               {
+                       if (SupportedMethods.overridesTermIDF)
+                       {
+                               float idf = Idf(term, searcher);
+                               return new AnonymousClassIDFExplanation(idf, this);
+                       }
+                       int df = searcher.DocFreq(term);
+                       int max = searcher.MaxDoc();
+                       float idf2 = Idf(df, max);
+                       return new AnonymousClassIDFExplanation1(df, max, idf2, this);
+               }
+               
+               /// <summary>Computes a score factor for a phrase.
+               /// 
+               /// <p/>The default implementation sums the {@link #Idf(Term,Searcher)} factor
+               /// for each term in the phrase.
+               /// 
+               /// </summary>
+               /// <param name="terms">the terms in the phrase
+               /// </param>
+               /// <param name="searcher">the document collection being searched
+               /// </param>
+               /// <returns> idf score factor
+               /// </returns>
+               /// <deprecated> see {@link #idfExplain(Collection, Searcher)}
+               /// </deprecated>
+        [Obsolete("see IdfExplain(Collection, Searcher)")]
+               public virtual float Idf(System.Collections.ICollection terms, Searcher searcher)
+               {
+                       float idf = 0.0f;
+                       System.Collections.IEnumerator i = terms.GetEnumerator();
+                       while (i.MoveNext())
+                       {
+                               idf += Idf((Term) i.Current, searcher);
+                       }
+                       return idf;
+               }
+               
+               /// <summary> Computes a score factor for a phrase.
+               /// 
+               /// <p/>
+               /// The default implementation sums the idf factor for
+               /// each term in the phrase.
+               /// 
+               /// </summary>
+               /// <param name="terms">the terms in the phrase
+               /// </param>
+               /// <param name="searcher">the document collection being searched
+               /// </param>
+               /// <returns> an IDFExplain object that includes both an idf 
+               /// score factor for the phrase and an explanation 
+               /// for each term.
+               /// </returns>
+               /// <throws>  IOException </throws>
+               public virtual IDFExplanation idfExplain(System.Collections.ICollection terms, Searcher searcher)
+               {
+                       if (SupportedMethods.overridesCollectionIDF)
+                       {
+                               float idf = Idf(terms, searcher);
+                               return new AnonymousClassIDFExplanation2(idf, this);
+                       }
+                       int max = searcher.MaxDoc();
+                       float idf2 = 0.0f;
+                       System.Text.StringBuilder exp = new System.Text.StringBuilder();
+            foreach (Term term in terms)
+                       {
+                               int df = searcher.DocFreq(term);
+                               idf2 += Idf(df, max);
+                               exp.Append(" ");
+                               exp.Append(term.Text());
+                               exp.Append("=");
+                               exp.Append(df);
+                       }
+                       float fIdf = idf2;
+                       return new AnonymousClassIDFExplanation3(fIdf, exp, this);
+               }
+               
+               /// <summary>Computes a score factor based on a term's document frequency (the number
+               /// of documents which contain the term).  This value is multiplied by the
+               /// {@link #Tf(int)} factor for each term in the query and these products are
+               /// then summed to form the initial score for a document.
+               /// 
+               /// <p/>Terms that occur in fewer documents are better indicators of topic, so
+               /// implementations of this method usually return larger values for rare terms,
+               /// and smaller values for common terms.
+               /// 
+               /// </summary>
+               /// <param name="docFreq">the number of documents which contain the term
+               /// </param>
+               /// <param name="numDocs">the total number of documents in the collection
+               /// </param>
+               /// <returns> a score factor based on the term's document frequency
+               /// </returns>
+               public abstract float Idf(int docFreq, int numDocs);
+               
+               /// <summary>Computes a score factor based on the fraction of all query terms that a
+               /// document contains.  This value is multiplied into scores.
+               /// 
+               /// <p/>The presence of a large portion of the query terms indicates a better
+               /// match with the query, so implementations of this method usually return
+               /// larger values when the ratio between these parameters is large and smaller
+               /// values when the ratio between them is small.
+               /// 
+               /// </summary>
+               /// <param name="overlap">the number of query terms matched in the document
+               /// </param>
+               /// <param name="maxOverlap">the total number of terms in the query
+               /// </param>
+               /// <returns> a score factor based on term overlap with the query
+               /// </returns>
+               public abstract float Coord(int overlap, int maxOverlap);
+               
+               
+               
+               
+               /// <summary> Calculate a scoring factor based on the data in the payload.  Overriding implementations
+               /// are responsible for interpreting what is in the payload.  Lucene makes no assumptions about
+               /// what is in the byte array.
+               /// <p/>
+               /// The default implementation returns 1.
+               /// 
+               /// </summary>
+               /// <param name="fieldName">The fieldName of the term this payload belongs to
+               /// </param>
+               /// <param name="payload">The payload byte array to be scored
+               /// </param>
+               /// <param name="offset">The offset into the payload array
+               /// </param>
+               /// <param name="length">The length in the array
+               /// </param>
+               /// <returns> An implementation dependent float to be used as a scoring factor
+               /// 
+               /// </returns>
+               /// <deprecated> See {@link #ScorePayload(int, String, int, int, byte[], int, int)}
+               /// </deprecated>
+               //TODO: When removing this, set the default value below to return 1.
+        [Obsolete("See ScorePayload(int, String, int, int, byte[], int, int)")]
+               public virtual float ScorePayload(System.String fieldName, byte[] payload, int offset, int length)
+               {
+                       //Do nothing
+                       return 1;
+               }
+               
+               /// <summary> Calculate a scoring factor based on the data in the payload.  Overriding implementations
+               /// are responsible for interpreting what is in the payload.  Lucene makes no assumptions about
+               /// what is in the byte array.
+               /// <p/>
+               /// The default implementation returns 1.
+               /// 
+               /// </summary>
+               /// <param name="docId">The docId currently being scored.  If this value is {@link #NO_DOC_ID_PROVIDED}, then it should be assumed that the PayloadQuery implementation does not provide document information
+               /// </param>
+               /// <param name="fieldName">The fieldName of the term this payload belongs to
+               /// </param>
+               /// <param name="start">The start position of the payload
+               /// </param>
+               /// <param name="end">The end position of the payload
+               /// </param>
+               /// <param name="payload">The payload byte array to be scored
+               /// </param>
+               /// <param name="offset">The offset into the payload array
+               /// </param>
+               /// <param name="length">The length in the array
+               /// </param>
+               /// <returns> An implementation dependent float to be used as a scoring factor
+               /// 
+               /// </returns>
+               public virtual float ScorePayload(int docId, System.String fieldName, int start, int end, byte[] payload, int offset, int length)
+               {
+                       //TODO: When removing the deprecated scorePayload above, set this to return 1
+                       return ScorePayload(fieldName, payload, offset, length);
+               }
+               
+               /// <deprecated> Remove this when old API is removed! 
+               /// </deprecated>
+        [Obsolete("Remove this when old API is removed! ")]
+               private MethodSupport SupportedMethods;
+               
+               /// <deprecated> Remove this when old API is removed! 
+               /// </deprecated>
+        [Obsolete("Remove this when old API is removed! ")]
+               [Serializable]
+               private sealed class MethodSupport
+               {
+                       internal bool overridesCollectionIDF;
+                       internal bool overridesTermIDF;
+                       
+                       internal MethodSupport(System.Type clazz)
+                       {
+                               overridesCollectionIDF = IsMethodOverridden(clazz, "Idf", C_IDF_METHOD_PARAMS);
+                               overridesTermIDF = IsMethodOverridden(clazz, "Idf", T_IDF_METHOD_PARAMS);
+                       }
+                       
+                       private static bool IsMethodOverridden(System.Type clazz, System.String name, System.Type[] params_Renamed)
+                       {
+                               try
+                               {
+                                       return clazz.GetMethod(name, (params_Renamed == null)?new System.Type[0]:(System.Type[]) params_Renamed).DeclaringType != typeof(Similarity);
+                               }
+                               catch (System.MethodAccessException e)
+                               {
+                                       // should not happen
+                                       throw new System.SystemException(e.Message, e);
+                               }
+                       }
+                       /// <deprecated> Remove this when old API is removed! 
+                       /// </deprecated>
+            [Obsolete("Remove this when old API is removed! ")]
+                       private static readonly System.Type[] T_IDF_METHOD_PARAMS = new System.Type[]{typeof(Term), typeof(Searcher)};
+                       
+                       /// <deprecated> Remove this when old API is removed! 
+                       /// </deprecated>
+            [Obsolete("Remove this when old API is removed! ")]
+                       private static readonly System.Type[] C_IDF_METHOD_PARAMS = new System.Type[]{typeof(System.Collections.ICollection), typeof(Searcher)};
+               }
+               
+               /// <deprecated> Remove this when old API is removed! 
+               /// </deprecated>
+        [Obsolete("Remove this when old API is removed! ")]
+               private static readonly System.Collections.Hashtable knownMethodSupport = new System.Collections.Hashtable();
+
+        // {{Aroush-2.9 Port issue, need to mimic java's IdentityHashMap
+        /*
+         * From Java docs:
+         * This class implements the Map interface with a hash table, using 
+         * reference-equality in place of object-equality when comparing keys 
+         * (and values). In other words, in an IdentityHashMap, two keys k1 and k2 
+         * are considered equal if and only if (k1==k2). (In normal Map 
+         * implementations (like HashMap) two keys k1 and k2 are considered 
+         * equal if and only if (k1==null ? k2==null : k1.equals(k2)).) 
+         */
+        // Aroush-2.9}}
+               
+               /// <deprecated> Remove this when old API is removed! 
+               /// </deprecated>
+        [Obsolete("Remove this when old API is removed! ")]
+               private static MethodSupport GetSupportedMethods(System.Type clazz)
+               {
+                       MethodSupport supportedMethods;
+                       lock (knownMethodSupport)
+                       {
+                               supportedMethods = (MethodSupport) knownMethodSupport[clazz];
+                               if (supportedMethods == null)
+                               {
+                                       knownMethodSupport.Add(clazz, supportedMethods = new MethodSupport(clazz));
+                               }
+                       }
+                       return supportedMethods;
+               }
+               
+               /// <summary>The Similarity implementation used by default. 
+               /// TODO: move back to top when old API is removed! 
+               /// 
+               /// </summary>
+               private static Similarity defaultImpl = new DefaultSimilarity();
+               static Similarity()
+               {
+                       {
+                               for (int i = 0; i < 256; i++)
+                                       NORM_TABLE[i] = SmallFloat.Byte315ToFloat((byte) i);
+                       }
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/SimilarityDelegator.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/SimilarityDelegator.cs
new file mode 100644 (file)
index 0000000..1b1011d
--- /dev/null
@@ -0,0 +1,86 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using FieldInvertState = Mono.Lucene.Net.Index.FieldInvertState;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary>Expert: Delegating scoring implementation.  Useful in {@link
+       /// Query#GetSimilarity(Searcher)} implementations, to override only certain
+       /// methods of a Searcher's Similiarty implementation.. 
+       /// </summary>
+       [Serializable]
+       public class SimilarityDelegator:Similarity
+       {
+               
+               private Similarity delegee;
+               
+               /// <summary>Construct a {@link Similarity} that delegates all methods to another.
+               /// 
+               /// </summary>
+               /// <param name="delegee">the Similarity implementation to delegate to
+               /// </param>
+               public SimilarityDelegator(Similarity delegee)
+               {
+                       this.delegee = delegee;
+               }
+               
+               public override float ComputeNorm(System.String fieldName, FieldInvertState state)
+               {
+                       return delegee.ComputeNorm(fieldName, state);
+               }
+               
+               public override float LengthNorm(System.String fieldName, int numTerms)
+               {
+                       return delegee.LengthNorm(fieldName, numTerms);
+               }
+               
+               public override float QueryNorm(float sumOfSquaredWeights)
+               {
+                       return delegee.QueryNorm(sumOfSquaredWeights);
+               }
+               
+               public override float Tf(float freq)
+               {
+                       return delegee.Tf(freq);
+               }
+               
+               public override float SloppyFreq(int distance)
+               {
+                       return delegee.SloppyFreq(distance);
+               }
+               
+               public override float Idf(int docFreq, int numDocs)
+               {
+                       return delegee.Idf(docFreq, numDocs);
+               }
+               
+               public override float Coord(int overlap, int maxOverlap)
+               {
+                       return delegee.Coord(overlap, maxOverlap);
+               }
+
+        [Obsolete("Mono.Lucene.Net-2.9.1. This method overrides obsolete member Mono.Lucene.Net.Search.Similarity.ScorePayload(string, byte[], int, int)")]
+               public override float ScorePayload(System.String fieldName, byte[] payload, int offset, int length)
+               {
+                       return delegee.ScorePayload(fieldName, payload, offset, length);
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/SloppyPhraseScorer.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/SloppyPhraseScorer.cs
new file mode 100644 (file)
index 0000000..b94b6f5
--- /dev/null
@@ -0,0 +1,243 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using TermPositions = Mono.Lucene.Net.Index.TermPositions;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       sealed class SloppyPhraseScorer:PhraseScorer
+       {
+               private int slop;
+               private PhrasePositions[] repeats;
+               private PhrasePositions[] tmpPos; // for flipping repeating pps.
+               private bool checkedRepeats;
+               
+               internal SloppyPhraseScorer(Weight weight, TermPositions[] tps, int[] offsets, Similarity similarity, int slop, byte[] norms):base(weight, tps, offsets, similarity, norms)
+               {
+                       this.slop = slop;
+               }
+               
+               /// <summary> Score a candidate doc for all slop-valid position-combinations (matches) 
+               /// encountered while traversing/hopping the PhrasePositions.
+               /// <br/> The score contribution of a match depends on the distance: 
+               /// <br/> - highest score for distance=0 (exact match).
+               /// <br/> - score gets lower as distance gets higher.
+               /// <br/>Example: for query "a b"~2, a document "x a b a y" can be scored twice: 
+               /// once for "a b" (distance=0), and once for "b a" (distance=2).
+               /// <br/>Possibly not all valid combinations are encountered, because for efficiency  
+               /// we always propagate the least PhrasePosition. This allows to base on 
+               /// PriorityQueue and move forward faster. 
+               /// As result, for example, document "a b c b a"
+               /// would score differently for queries "a b c"~4 and "c b a"~4, although 
+               /// they really are equivalent. 
+               /// Similarly, for doc "a b c b a f g", query "c b"~2 
+               /// would get same score as "g f"~2, although "c b"~2 could be matched twice.
+               /// We may want to fix this in the future (currently not, for performance reasons).
+               /// </summary>
+               protected internal override float PhraseFreq()
+               {
+                       int end = InitPhrasePositions();
+                       
+                       float freq = 0.0f;
+                       bool done = (end < 0);
+                       while (!done)
+                       {
+                               PhrasePositions pp = (PhrasePositions) pq.Pop();
+                               int start = pp.position;
+                               int next = ((PhrasePositions) pq.Top()).position;
+                               
+                               bool tpsDiffer = true;
+                               for (int pos = start; pos <= next || !tpsDiffer; pos = pp.position)
+                               {
+                                       if (pos <= next && tpsDiffer)
+                                               start = pos; // advance pp to min window
+                                       if (!pp.NextPosition())
+                                       {
+                                               done = true; // ran out of a term -- done
+                                               break;
+                                       }
+                                       PhrasePositions pp2 = null;
+                                       tpsDiffer = !pp.repeats || (pp2 = TermPositionsDiffer(pp)) == null;
+                                       if (pp2 != null && pp2 != pp)
+                                       {
+                                               pp = Flip(pp, pp2); // flip pp to pp2
+                                       }
+                               }
+                               
+                               int matchLength = end - start;
+                               if (matchLength <= slop)
+                                       freq += GetSimilarity().SloppyFreq(matchLength); // score match
+                               
+                               if (pp.position > end)
+                                       end = pp.position;
+                               pq.Put(pp); // restore pq
+                       }
+                       
+                       return freq;
+               }
+               
+               // flip pp2 and pp in the queue: pop until finding pp2, insert back all but pp2, insert pp back.
+               // assumes: pp!=pp2, pp2 in pq, pp not in pq.
+               // called only when there are repeating pps.
+               private PhrasePositions Flip(PhrasePositions pp, PhrasePositions pp2)
+               {
+                       int n = 0;
+                       PhrasePositions pp3;
+                       //pop until finding pp2
+                       while ((pp3 = (PhrasePositions) pq.Pop()) != pp2)
+                       {
+                               tmpPos[n++] = pp3;
+                       }
+                       //insert back all but pp2
+                       for (n--; n >= 0; n--)
+                       {
+                               pq.Insert(tmpPos[n]);
+                       }
+                       //insert pp back
+                       pq.Put(pp);
+                       return pp2;
+               }
+               
+               /// <summary> Init PhrasePositions in place.
+               /// There is a one time initialization for this scorer:
+               /// <br/>- Put in repeats[] each pp that has another pp with same position in the doc.
+               /// <br/>- Also mark each such pp by pp.repeats = true.
+               /// <br/>Later can consult with repeats[] in termPositionsDiffer(pp), making that check efficient.
+               /// In particular, this allows to score queries with no repetitions with no overhead due to this computation.
+               /// <br/>- Example 1 - query with no repetitions: "ho my"~2
+               /// <br/>- Example 2 - query with repetitions: "ho my my"~2
+               /// <br/>- Example 3 - query with repetitions: "my ho my"~2
+               /// <br/>Init per doc w/repeats in query, includes propagating some repeating pp's to avoid false phrase detection.  
+               /// </summary>
+               /// <returns> end (max position), or -1 if any term ran out (i.e. done) 
+               /// </returns>
+               /// <throws>  IOException  </throws>
+               private int InitPhrasePositions()
+               {
+                       int end = 0;
+                       
+                       // no repeats at all (most common case is also the simplest one)
+                       if (checkedRepeats && repeats == null)
+                       {
+                               // build queue from list
+                               pq.Clear();
+                               for (PhrasePositions pp = first; pp != null; pp = pp.next)
+                               {
+                                       pp.FirstPosition();
+                                       if (pp.position > end)
+                                               end = pp.position;
+                                       pq.Put(pp); // build pq from list
+                               }
+                               return end;
+                       }
+                       
+                       // position the pp's
+                       for (PhrasePositions pp = first; pp != null; pp = pp.next)
+                               pp.FirstPosition();
+                       
+                       // one time initializatin for this scorer
+                       if (!checkedRepeats)
+                       {
+                               checkedRepeats = true;
+                               // check for repeats
+                               System.Collections.Hashtable m = null;
+                               for (PhrasePositions pp = first; pp != null; pp = pp.next)
+                               {
+                                       int tpPos = pp.position + pp.offset;
+                                       for (PhrasePositions pp2 = pp.next; pp2 != null; pp2 = pp2.next)
+                                       {
+                                               int tpPos2 = pp2.position + pp2.offset;
+                                               if (tpPos2 == tpPos)
+                                               {
+                                                       if (m == null)
+                                                       {
+                                                               m = new System.Collections.Hashtable();
+                                                       }
+                                                       pp.repeats = true;
+                                                       pp2.repeats = true;
+                                                       m[pp] = null;
+                                                       m[pp2] = null;
+                                               }
+                                       }
+                               }
+                               if (m != null)
+                               {
+                                       repeats = (PhrasePositions[])(new System.Collections.ArrayList(m.Keys).ToArray(typeof(PhrasePositions)));
+                               }
+                       }
+                       
+                       // with repeats must advance some repeating pp's so they all start with differing tp's       
+                       if (repeats != null)
+                       {
+                               for (int i = 0; i < repeats.Length; i++)
+                               {
+                                       PhrasePositions pp = repeats[i];
+                                       PhrasePositions pp2;
+                                       while ((pp2 = TermPositionsDiffer(pp)) != null)
+                                       {
+                                               if (!pp2.NextPosition())
+                                               // out of pps that do not differ, advance the pp with higher offset 
+                                                       return - 1; // ran out of a term -- done  
+                                       }
+                               }
+                       }
+                       
+                       // build queue from list
+                       pq.Clear();
+                       for (PhrasePositions pp = first; pp != null; pp = pp.next)
+                       {
+                               if (pp.position > end)
+                                       end = pp.position;
+                               pq.Put(pp); // build pq from list
+                       }
+                       
+                       if (repeats != null)
+                       {
+                               tmpPos = new PhrasePositions[pq.Size()];
+                       }
+                       return end;
+               }
+               
+               /// <summary> We disallow two pp's to have the same TermPosition, thereby verifying multiple occurrences 
+               /// in the query of the same word would go elsewhere in the matched doc.
+               /// </summary>
+               /// <returns> null if differ (i.e. valid) otherwise return the higher offset PhrasePositions
+               /// out of the first two PPs found to not differ.
+               /// </returns>
+               private PhrasePositions TermPositionsDiffer(PhrasePositions pp)
+               {
+                       // efficiency note: a more efficient implementation could keep a map between repeating 
+                       // pp's, so that if pp1a, pp1b, pp1c are repeats term1, and pp2a, pp2b are repeats 
+                       // of term2, pp2a would only be checked against pp2b but not against pp1a, pp1b, pp1c. 
+                       // However this would complicate code, for a rather rare case, so choice is to compromise here.
+                       int tpPos = pp.position + pp.offset;
+                       for (int i = 0; i < repeats.Length; i++)
+                       {
+                               PhrasePositions pp2 = repeats[i];
+                               if (pp2 == pp)
+                                       continue;
+                               int tpPos2 = pp2.position + pp2.offset;
+                               if (tpPos2 == tpPos)
+                                       return pp.offset > pp2.offset?pp:pp2; // do not differ: return the one with higher offset.
+                       }
+                       return null;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Sort.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Sort.cs
new file mode 100644 (file)
index 0000000..f3761da
--- /dev/null
@@ -0,0 +1,310 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       
+       /// <summary> Encapsulates sort criteria for returned hits.
+       /// 
+       /// <p/>The fields used to determine sort order must be carefully chosen.
+       /// Documents must contain a single term in such a field,
+       /// and the value of the term should indicate the document's relative position in
+       /// a given sort order.  The field must be indexed, but should not be tokenized,
+       /// and does not need to be stored (unless you happen to want it back with the
+       /// rest of your document data).  In other words:
+       /// 
+       /// <p/><code>document.add (new Field ("byNumber", Integer.toString(x), Field.Store.NO, Field.Index.NOT_ANALYZED));</code><p/>
+       /// 
+       /// 
+       /// <p/><h3>Valid Types of Values</h3>
+       /// 
+       /// <p/>There are four possible kinds of term values which may be put into
+       /// sorting fields: Integers, Longs, Floats, or Strings.  Unless
+       /// {@link SortField SortField} objects are specified, the type of value
+       /// in the field is determined by parsing the first term in the field.
+       /// 
+       /// <p/>Integer term values should contain only digits and an optional
+       /// preceding negative sign.  Values must be base 10 and in the range
+       /// <code>Integer.MIN_VALUE</code> and <code>Integer.MAX_VALUE</code> inclusive.
+       /// Documents which should appear first in the sort
+       /// should have low value integers, later documents high values
+       /// (i.e. the documents should be numbered <code>1..n</code> where
+       /// <code>1</code> is the first and <code>n</code> the last).
+       /// 
+       /// <p/>Long term values should contain only digits and an optional
+       /// preceding negative sign.  Values must be base 10 and in the range
+       /// <code>Long.MIN_VALUE</code> and <code>Long.MAX_VALUE</code> inclusive.
+       /// Documents which should appear first in the sort
+       /// should have low value integers, later documents high values.
+       /// 
+       /// <p/>Float term values should conform to values accepted by
+       /// {@link Float Float.valueOf(String)} (except that <code>NaN</code>
+       /// and <code>Infinity</code> are not supported).
+       /// Documents which should appear first in the sort
+       /// should have low values, later documents high values.
+       /// 
+       /// <p/>String term values can contain any valid String, but should
+       /// not be tokenized.  The values are sorted according to their
+       /// {@link Comparable natural order}.  Note that using this type
+       /// of term value has higher memory requirements than the other
+       /// two types.
+       /// 
+       /// <p/><h3>Object Reuse</h3>
+       /// 
+       /// <p/>One of these objects can be
+       /// used multiple times and the sort order changed between usages.
+       /// 
+       /// <p/>This class is thread safe.
+       /// 
+       /// <p/><h3>Memory Usage</h3>
+       /// 
+       /// <p/>Sorting uses of caches of term values maintained by the
+       /// internal HitQueue(s).  The cache is static and contains an integer
+       /// or float array of length <code>IndexReader.maxDoc()</code> for each field
+       /// name for which a sort is performed.  In other words, the size of the
+       /// cache in bytes is:
+       /// 
+       /// <p/><code>4 * IndexReader.maxDoc() * (# of different fields actually used to sort)</code>
+       /// 
+       /// <p/>For String fields, the cache is larger: in addition to the
+       /// above array, the value of every term in the field is kept in memory.
+       /// If there are many unique terms in the field, this could
+       /// be quite large.
+       /// 
+       /// <p/>Note that the size of the cache is not affected by how many
+       /// fields are in the index and <i>might</i> be used to sort - only by
+       /// the ones actually used to sort a result set.
+       /// 
+       /// <p/>Created: Feb 12, 2004 10:53:57 AM
+       /// 
+       /// </summary>
+       /// <since>   lucene 1.4
+       /// </since>
+       /// <version>  $Id: Sort.java 795179 2009-07-17 18:23:30Z mikemccand $
+       /// </version>
+       [Serializable]
+       public class Sort
+       {
+               
+               /// <summary> Represents sorting by computed relevance. Using this sort criteria returns
+               /// the same results as calling
+               /// {@link Searcher#Search(Query) Searcher#search()}without a sort criteria,
+               /// only with slightly more overhead.
+               /// </summary>
+               public static readonly Sort RELEVANCE = new Sort();
+               
+               /// <summary>Represents sorting by index order. </summary>
+               public static readonly Sort INDEXORDER;
+               
+               // internal representation of the sort criteria
+               internal SortField[] fields;
+               
+               /// <summary> Sorts by computed relevance. This is the same sort criteria as calling
+               /// {@link Searcher#Search(Query) Searcher#search()}without a sort criteria,
+               /// only with slightly more overhead.
+               /// </summary>
+               public Sort():this(SortField.FIELD_SCORE)
+               {
+               }
+               
+               /// <summary> Sorts by the terms in <code>field</code> then by index order (document
+               /// number). The type of value in <code>field</code> is determined
+               /// automatically.
+               /// 
+               /// </summary>
+               /// <seealso cref="SortField.AUTO">
+               /// </seealso>
+               /// <deprecated> Please specify the type explicitly by
+               /// first creating a {@link SortField} and then use {@link
+               /// #Sort(SortField)}
+               /// </deprecated>
+        [Obsolete("Please specify the type explicitly by first creating a SortField and then use Sort(SortField)")]
+               public Sort(System.String field)
+               {
+                       SetSort(field, false);
+               }
+               
+               /// <summary> Sorts possibly in reverse by the terms in <code>field</code> then by
+               /// index order (document number). The type of value in <code>field</code> is
+               /// determined automatically.
+               /// 
+               /// </summary>
+               /// <seealso cref="SortField.AUTO">
+               /// </seealso>
+               /// <deprecated> Please specify the type explicitly by
+               /// first creating a {@link SortField} and then use {@link
+               /// #Sort(SortField)}
+               /// </deprecated>
+        [Obsolete("Please specify the type explicitly by first creating a SortField and then use Sort(SortField)")]
+               public Sort(System.String field, bool reverse)
+               {
+                       SetSort(field, reverse);
+               }
+               
+               /// <summary> Sorts in succession by the terms in each field. The type of value in
+               /// <code>field</code> is determined automatically.
+               /// 
+               /// </summary>
+               /// <seealso cref="SortField.AUTO">
+               /// </seealso>
+               /// <deprecated> Please specify the type explicitly by
+               /// first creating {@link SortField}s and then use {@link
+               /// #Sort(SortField[])}
+               /// </deprecated>
+        [Obsolete("Please specify the type explicitly by first creating SortFields and then use Sort(SortField[])")]
+               public Sort(System.String[] fields)
+               {
+                       SetSort(fields);
+               }
+               
+               /// <summary>Sorts by the criteria in the given SortField. </summary>
+               public Sort(SortField field)
+               {
+                       SetSort(field);
+               }
+               
+               /// <summary>Sorts in succession by the criteria in each SortField. </summary>
+               public Sort(SortField[] fields)
+               {
+                       SetSort(fields);
+               }
+               
+               /// <summary> Sets the sort to the terms in <code>field</code> then by index order
+               /// (document number).
+               /// </summary>
+               /// <deprecated> Please specify the type explicitly by
+               /// first creating a {@link SortField} and then use {@link
+               /// #SetSort(SortField)}
+               /// </deprecated>
+        [Obsolete("Please specify the type explicitly by first creating a SortField and then use SetSort(SortField)")]
+               public void  SetSort(System.String field)
+               {
+                       SetSort(field, false);
+               }
+               
+               /// <summary> Sets the sort to the terms in <code>field</code> possibly in reverse,
+               /// then by index order (document number).
+               /// </summary>
+               /// <deprecated> Please specify the type explicitly by
+               /// first creating a {@link SortField} and then use {@link
+               /// #SetSort(SortField)}
+               /// </deprecated>
+        [Obsolete("Please specify the type explicitly by first creating a SortField and then use SetSort(SortField)")]
+               public virtual void  SetSort(System.String field, bool reverse)
+               {
+                       fields = new SortField[]{new SortField(field, SortField.AUTO, reverse)};
+               }
+               
+               /// <summary>Sets the sort to the terms in each field in succession.</summary>
+               /// <deprecated> Please specify the type explicitly by
+               /// first creating {@link SortField}s and then use {@link
+               /// #SetSort(SortField[])} 
+               /// </deprecated>
+        [Obsolete("Please specify the type explicitly by first creating a SortFields and then use SetSort(SortField[])")]
+               public virtual void  SetSort(System.String[] fieldnames)
+               {
+                       int n = fieldnames.Length;
+                       SortField[] nfields = new SortField[n];
+                       for (int i = 0; i < n; ++i)
+                       {
+                               nfields[i] = new SortField(fieldnames[i], SortField.AUTO);
+                       }
+                       fields = nfields;
+               }
+               
+               /// <summary>Sets the sort to the given criteria. </summary>
+               public virtual void  SetSort(SortField field)
+               {
+                       this.fields = new SortField[]{field};
+               }
+               
+               /// <summary>Sets the sort to the given criteria in succession. </summary>
+               public virtual void  SetSort(SortField[] fields)
+               {
+                       this.fields = fields;
+               }
+               
+               /// <summary> Representation of the sort criteria.</summary>
+               /// <returns> Array of SortField objects used in this sort criteria
+               /// </returns>
+               public virtual SortField[] GetSort()
+               {
+                       return fields;
+               }
+               
+               public override System.String ToString()
+               {
+                       System.Text.StringBuilder buffer = new System.Text.StringBuilder();
+                       
+                       for (int i = 0; i < fields.Length; i++)
+                       {
+                               buffer.Append(fields[i].ToString());
+                               if ((i + 1) < fields.Length)
+                                       buffer.Append(',');
+                       }
+                       
+                       return buffer.ToString();
+               }
+               
+               /// <summary>Returns true if <code>o</code> is equal to this. </summary>
+               public  override bool Equals(System.Object o)
+               {
+                       if (this == o)
+                               return true;
+                       if (!(o is Sort))
+                               return false;
+                       Sort other = (Sort) o;
+
+            bool result = false;
+            if ((this.fields == null) && (other.fields == null))
+                result = true;
+            else if ((this.fields != null) && (other.fields != null))
+            {
+                if (this.fields.Length == other.fields.Length)
+                {
+                    int length = this.fields.Length;
+                    result = true;
+                    for (int i = 0; i < length; i++)
+                    {
+                        if (!(this.fields[i].Equals(other.fields[i])))
+                        {
+                            result = false;
+                            break;
+                        }
+                    }
+                }
+            }
+            return result;
+               }
+               
+               /// <summary>Returns a hash code value for this object. </summary>
+               public override int GetHashCode()
+               {
+                       // TODO in Java 1.5: switch to Arrays.hashCode().  The 
+                       // Java 1.4 workaround below calculates the same hashCode
+                       // as Java 1.5's new Arrays.hashCode()
+                       return 0x45aaf665 + SupportClass.EquatableList<SortField>.GetHashCode(fields);
+               }
+               static Sort()
+               {
+                       INDEXORDER = new Sort(SortField.FIELD_DOC);
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/SortComparator.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/SortComparator.cs
new file mode 100644 (file)
index 0000000..9e507ab
--- /dev/null
@@ -0,0 +1,123 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary> Abstract base class for sorting hits returned by a Query.
+       /// 
+       /// <p/>
+       /// This class should only be used if the other SortField types (SCORE, DOC,
+       /// STRING, INT, FLOAT) do not provide an adequate sorting. It maintains an
+       /// internal cache of values which could be quite large. The cache is an array of
+       /// Comparable, one for each document in the index. There is a distinct
+       /// Comparable for each unique term in the field - if some documents have the
+       /// same term in the field, the cache array will have entries which reference the
+       /// same Comparable.
+       /// 
+       /// This class will be used as part of a key to a FieldCache value. You must
+       /// implement hashCode and equals to avoid an explosion in RAM usage if you use
+       /// instances that are not the same instance. If you are searching using the
+       /// Remote contrib, the same instance of this class on the client will be a new
+       /// instance on every call to the server, so hashCode/equals is very important in
+       /// that situation.
+       /// 
+       /// <p/>
+       /// Created: Apr 21, 2004 5:08:38 PM
+       /// 
+       /// 
+       /// </summary>
+       /// <version>  $Id: SortComparator.java 800119 2009-08-02 17:59:21Z markrmiller $
+       /// </version>
+       /// <since> 1.4
+       /// </since>
+       /// <deprecated> Please use {@link FieldComparatorSource} instead.
+       /// </deprecated>
+    [Obsolete("Please use FieldComparatorSource instead.")]
+       [Serializable]
+       public abstract class SortComparator : SortComparatorSource
+       {
+               private class AnonymousClassScoreDocComparator : ScoreDocComparator
+               {
+                       public AnonymousClassScoreDocComparator(System.IComparable[] cachedValues, SortComparator enclosingInstance)
+                       {
+                               InitBlock(cachedValues, enclosingInstance);
+                       }
+                       private void  InitBlock(System.IComparable[] cachedValues, SortComparator enclosingInstance)
+                       {
+                               this.cachedValues = cachedValues;
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private System.IComparable[] cachedValues;
+                       private SortComparator enclosingInstance;
+                       public SortComparator Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       
+                       public virtual int Compare(ScoreDoc i, ScoreDoc j)
+                       {
+                               return cachedValues[i.doc].CompareTo(cachedValues[j.doc]);
+                       }
+                       
+                       public virtual System.IComparable SortValue(ScoreDoc i)
+                       {
+                               return cachedValues[i.doc];
+                       }
+                       
+                       public virtual int SortType()
+                       {
+                               return SortField.CUSTOM;
+                       }
+               }
+               
+               // inherit javadocs
+               public virtual ScoreDocComparator NewComparator(IndexReader reader, System.String fieldname)
+               {
+                       System.String field = String.Intern(fieldname);
+                       System.IComparable[] cachedValues = Mono.Lucene.Net.Search.FieldCache_Fields.DEFAULT.GetCustom(reader, field, this);
+                       
+                       return new AnonymousClassScoreDocComparator(cachedValues, this);
+               }
+               
+               /// <summary> Returns an object which, when sorted according to natural order,
+               /// will order the Term values in the correct order.
+               /// <p/>For example, if the Terms contained integer values, this method
+               /// would return <code>new Integer(termtext)</code>.  Note that this
+               /// might not always be the most efficient implementation - for this
+               /// particular example, a better implementation might be to make a
+               /// ScoreDocLookupComparator that uses an internal lookup table of int.
+               /// </summary>
+               /// <param name="termtext">The textual value of the term.
+               /// </param>
+               /// <returns> An object representing <code>termtext</code> that sorts according to the natural order of <code>termtext</code>.
+               /// </returns>
+               /// <seealso cref="Comparable">
+               /// </seealso>
+               /// <seealso cref="ScoreDocComparator">
+               /// </seealso>
+               public /*protected internal*/ abstract System.IComparable GetComparable(System.String termtext);
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/SortComparatorSource.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/SortComparatorSource.cs
new file mode 100644 (file)
index 0000000..1b35bcf
--- /dev/null
@@ -0,0 +1,59 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary> Expert: returns a comparator for sorting ScoreDocs.
+       /// 
+       /// <p/>
+       /// Created: Apr 21, 2004 3:49:28 PM
+       /// 
+       /// This class will be used as part of a key to a FieldCache value. You must
+       /// implement hashCode and equals to avoid an explosion in RAM usage if you use
+       /// instances that are not the same instance. If you are searching using the
+       /// Remote contrib, the same instance of this class on the client will be a new
+       /// instance on every call to the server, so hashCode/equals is very important in
+       /// that situation.
+       /// 
+       /// </summary>
+       /// <version>  $Id: SortComparatorSource.java 747019 2009-02-23 13:59:50Z
+       /// mikemccand $
+       /// </version>
+       /// <since> 1.4
+       /// </since>
+       /// <deprecated> Please use {@link FieldComparatorSource} instead.
+       /// </deprecated>
+    [Obsolete("Please use FieldComparatorSource instead.")]
+       public interface SortComparatorSource
+       {
+               
+               /// <summary> Creates a comparator for the field in the given index.</summary>
+               /// <param name="reader">Index to create comparator for.
+               /// </param>
+               /// <param name="fieldname"> Name of the field to create comparator for.
+               /// </param>
+               /// <returns> Comparator of ScoreDoc objects.
+               /// </returns>
+               /// <throws>  IOException If an error occurs reading the index. </throws>
+               ScoreDocComparator NewComparator(IndexReader reader, System.String fieldname);
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/SortField.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/SortField.cs
new file mode 100644 (file)
index 0000000..3706378
--- /dev/null
@@ -0,0 +1,689 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using NumericField = Mono.Lucene.Net.Documents.NumericField;
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+using Term = Mono.Lucene.Net.Index.Term;
+using TermEnum = Mono.Lucene.Net.Index.TermEnum;
+using StringHelper = Mono.Lucene.Net.Util.StringHelper;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary> Stores information about how to sort documents by terms in an individual
+       /// field.  Fields must be indexed in order to sort by them.
+       /// 
+       /// <p/>Created: Feb 11, 2004 1:25:29 PM
+       /// 
+       /// </summary>
+       /// <since>   lucene 1.4
+       /// </since>
+       /// <version>  $Id: SortField.java 801344 2009-08-05 18:05:06Z yonik $
+       /// </version>
+       /// <seealso cref="Sort">
+       /// </seealso>
+       [Serializable]
+       public class SortField
+       {
+               
+               /// <summary>Sort by document score (relevancy).  Sort values are Float and higher
+               /// values are at the front. 
+               /// </summary>
+               public const int SCORE = 0;
+               
+               /// <summary>Sort by document number (index order).  Sort values are Integer and lower
+               /// values are at the front. 
+               /// </summary>
+               public const int DOC = 1;
+               
+               /// <summary>Guess type of sort based on field contents.  A regular expression is used
+               /// to look at the first term indexed for the field and determine if it
+               /// represents an integer number, a floating point number, or just arbitrary
+               /// string characters.
+               /// </summary>
+               /// <deprecated> Please specify the exact type, instead.
+               /// Especially, guessing does <b>not</b> work with the new
+               /// {@link NumericField} type.
+               /// </deprecated>
+        [Obsolete("Please specify the exact type, instead. Especially, guessing does not work with the new NumericField type.")]
+               public const int AUTO = 2;
+               
+               /// <summary>Sort using term values as Strings.  Sort values are String and lower
+               /// values are at the front. 
+               /// </summary>
+               public const int STRING = 3;
+               
+               /// <summary>Sort using term values as encoded Integers.  Sort values are Integer and
+               /// lower values are at the front. 
+               /// </summary>
+               public const int INT = 4;
+               
+               /// <summary>Sort using term values as encoded Floats.  Sort values are Float and
+               /// lower values are at the front. 
+               /// </summary>
+               public const int FLOAT = 5;
+               
+               /// <summary>Sort using term values as encoded Longs.  Sort values are Long and
+               /// lower values are at the front. 
+               /// </summary>
+               public const int LONG = 6;
+               
+               /// <summary>Sort using term values as encoded Doubles.  Sort values are Double and
+               /// lower values are at the front. 
+               /// </summary>
+               public const int DOUBLE = 7;
+               
+               /// <summary>Sort using term values as encoded Shorts.  Sort values are Short and
+               /// lower values are at the front. 
+               /// </summary>
+               public const int SHORT = 8;
+               
+               /// <summary>Sort using a custom Comparator.  Sort values are any Comparable and
+               /// sorting is done according to natural order. 
+               /// </summary>
+               public const int CUSTOM = 9;
+               
+               /// <summary>Sort using term values as encoded Bytes.  Sort values are Byte and
+               /// lower values are at the front. 
+               /// </summary>
+               public const int BYTE = 10;
+               
+               /// <summary>Sort using term values as Strings, but comparing by
+               /// value (using String.compareTo) for all comparisons.
+               /// This is typically slower than {@link #STRING}, which
+               /// uses ordinals to do the sorting. 
+               /// </summary>
+               public const int STRING_VAL = 11;
+               
+               // IMPLEMENTATION NOTE: the FieldCache.STRING_INDEX is in the same "namespace"
+               // as the above static int values.  Any new values must not have the same value
+               // as FieldCache.STRING_INDEX.
+               
+               /// <summary>Represents sorting by document score (relevancy). </summary>
+               public static readonly SortField FIELD_SCORE = new SortField(null, SCORE);
+               
+               /// <summary>Represents sorting by document number (index order). </summary>
+               public static readonly SortField FIELD_DOC = new SortField(null, DOC);
+               
+               private System.String field;
+               private int type = AUTO; // defaults to determining type dynamically
+               private System.Globalization.CultureInfo locale; // defaults to "natural order" (no Locale)
+               internal bool reverse = false; // defaults to natural order
+               private SortComparatorSource factory;
+               private Mono.Lucene.Net.Search.Parser parser;
+               
+               // Used for CUSTOM sort
+               private FieldComparatorSource comparatorSource;
+               
+               private bool useLegacy = false; // remove in Lucene 3.0
+               
+               /// <summary>Creates a sort by terms in the given field where the type of term value
+               /// is determined dynamically ({@link #AUTO AUTO}).
+               /// </summary>
+               /// <param name="field">Name of field to sort by, cannot be
+               /// <code>null</code>.
+               /// </param>
+               /// <deprecated> Please specify the exact type instead.
+               /// </deprecated>
+        [Obsolete("Please specify the exact type instead.")]
+               public SortField(System.String field)
+               {
+                       InitFieldType(field, AUTO);
+               }
+               
+               /// <summary>Creates a sort, possibly in reverse, by terms in the given field where
+               /// the type of term value is determined dynamically ({@link #AUTO AUTO}).
+               /// </summary>
+               /// <param name="field">Name of field to sort by, cannot be <code>null</code>.
+               /// </param>
+               /// <param name="reverse">True if natural order should be reversed.
+               /// </param>
+               /// <deprecated> Please specify the exact type instead.
+               /// </deprecated>
+        [Obsolete("Please specify the exact type instead.")]
+               public SortField(System.String field, bool reverse)
+               {
+                       InitFieldType(field, AUTO);
+                       this.reverse = reverse;
+               }
+               
+               /// <summary>Creates a sort by terms in the given field with the type of term
+               /// values explicitly given.
+               /// </summary>
+               /// <param name="field"> Name of field to sort by.  Can be <code>null</code> if
+               /// <code>type</code> is SCORE or DOC.
+               /// </param>
+               /// <param name="type">  Type of values in the terms.
+               /// </param>
+               public SortField(System.String field, int type)
+               {
+                       InitFieldType(field, type);
+               }
+               
+               /// <summary>Creates a sort, possibly in reverse, by terms in the given field with the
+               /// type of term values explicitly given.
+               /// </summary>
+               /// <param name="field"> Name of field to sort by.  Can be <code>null</code> if
+               /// <code>type</code> is SCORE or DOC.
+               /// </param>
+               /// <param name="type">  Type of values in the terms.
+               /// </param>
+               /// <param name="reverse">True if natural order should be reversed.
+               /// </param>
+               public SortField(System.String field, int type, bool reverse)
+               {
+                       InitFieldType(field, type);
+                       this.reverse = reverse;
+               }
+               
+               /// <summary>Creates a sort by terms in the given field, parsed
+               /// to numeric values using a custom {@link FieldCache.Parser}.
+               /// </summary>
+               /// <param name="field"> Name of field to sort by.  Must not be null.
+               /// </param>
+               /// <param name="parser">Instance of a {@link FieldCache.Parser},
+               /// which must subclass one of the existing numeric
+               /// parsers from {@link FieldCache}. Sort type is inferred
+               /// by testing which numeric parser the parser subclasses.
+               /// </param>
+               /// <throws>  IllegalArgumentException if the parser fails to </throws>
+               /// <summary>  subclass an existing numeric parser, or field is null
+               /// </summary>
+               public SortField(System.String field, Mono.Lucene.Net.Search.Parser parser):this(field, parser, false)
+               {
+               }
+               
+               /// <summary>Creates a sort, possibly in reverse, by terms in the given field, parsed
+               /// to numeric values using a custom {@link FieldCache.Parser}.
+               /// </summary>
+               /// <param name="field"> Name of field to sort by.  Must not be null.
+               /// </param>
+               /// <param name="parser">Instance of a {@link FieldCache.Parser},
+               /// which must subclass one of the existing numeric
+               /// parsers from {@link FieldCache}. Sort type is inferred
+               /// by testing which numeric parser the parser subclasses.
+               /// </param>
+               /// <param name="reverse">True if natural order should be reversed.
+               /// </param>
+               /// <throws>  IllegalArgumentException if the parser fails to </throws>
+               /// <summary>  subclass an existing numeric parser, or field is null
+               /// </summary>
+               public SortField(System.String field, Mono.Lucene.Net.Search.Parser parser, bool reverse)
+               {
+                       if (parser is Mono.Lucene.Net.Search.IntParser)
+                               InitFieldType(field, INT);
+                       else if (parser is Mono.Lucene.Net.Search.FloatParser)
+                               InitFieldType(field, FLOAT);
+                       else if (parser is Mono.Lucene.Net.Search.ShortParser)
+                               InitFieldType(field, SHORT);
+                       else if (parser is Mono.Lucene.Net.Search.ByteParser)
+                               InitFieldType(field, BYTE);
+                       else if (parser is Mono.Lucene.Net.Search.LongParser)
+                               InitFieldType(field, LONG);
+                       else if (parser is Mono.Lucene.Net.Search.DoubleParser)
+                               InitFieldType(field, DOUBLE);
+                       else
+                       {
+                               throw new System.ArgumentException("Parser instance does not subclass existing numeric parser from FieldCache (got " + parser + ")");
+                       }
+                       
+                       this.reverse = reverse;
+                       this.parser = parser;
+               }
+               
+               /// <summary>Creates a sort by terms in the given field sorted
+               /// according to the given locale.
+               /// </summary>
+               /// <param name="field"> Name of field to sort by, cannot be <code>null</code>.
+               /// </param>
+               /// <param name="locale">Locale of values in the field.
+               /// </param>
+               public SortField(System.String field, System.Globalization.CultureInfo locale)
+               {
+                       InitFieldType(field, STRING);
+                       this.locale = locale;
+               }
+               
+               /// <summary>Creates a sort, possibly in reverse, by terms in the given field sorted
+               /// according to the given locale.
+               /// </summary>
+               /// <param name="field"> Name of field to sort by, cannot be <code>null</code>.
+               /// </param>
+               /// <param name="locale">Locale of values in the field.
+               /// </param>
+               public SortField(System.String field, System.Globalization.CultureInfo locale, bool reverse)
+               {
+                       InitFieldType(field, STRING);
+                       this.locale = locale;
+                       this.reverse = reverse;
+               }
+               
+               /// <summary>Creates a sort with a custom comparison function.</summary>
+               /// <param name="field">Name of field to sort by; cannot be <code>null</code>.
+               /// </param>
+               /// <param name="comparator">Returns a comparator for sorting hits.
+               /// </param>
+               /// <deprecated> use SortField (String field, FieldComparatorSource comparator)
+               /// </deprecated>
+        [Obsolete("use SortField (String field, FieldComparatorSource comparator)")]
+               public SortField(System.String field, SortComparatorSource comparator)
+               {
+                       InitFieldType(field, CUSTOM);
+                       SetUseLegacySearch(true);
+                       this.factory = comparator;
+               }
+               
+               /// <summary>Creates a sort with a custom comparison function.</summary>
+               /// <param name="field">Name of field to sort by; cannot be <code>null</code>.
+               /// </param>
+               /// <param name="comparator">Returns a comparator for sorting hits.
+               /// </param>
+               public SortField(System.String field, FieldComparatorSource comparator)
+               {
+                       InitFieldType(field, CUSTOM);
+                       this.comparatorSource = comparator;
+               }
+               
+               /// <summary>Creates a sort, possibly in reverse, with a custom comparison function.</summary>
+               /// <param name="field">Name of field to sort by; cannot be <code>null</code>.
+               /// </param>
+               /// <param name="comparator">Returns a comparator for sorting hits.
+               /// </param>
+               /// <param name="reverse">True if natural order should be reversed.
+               /// </param>
+               /// <deprecated> use SortField (String field, FieldComparatorSource comparator, boolean reverse)
+               /// </deprecated>
+        [Obsolete("use SortField(String field, FieldComparatorSource comparator, boolean reverse)")]
+               public SortField(System.String field, SortComparatorSource comparator, bool reverse)
+               {
+                       InitFieldType(field, CUSTOM);
+                       SetUseLegacySearch(true);
+                       this.reverse = reverse;
+                       this.factory = comparator;
+               }
+               
+               /// <summary>Creates a sort, possibly in reverse, with a custom comparison function.</summary>
+               /// <param name="field">Name of field to sort by; cannot be <code>null</code>.
+               /// </param>
+               /// <param name="comparator">Returns a comparator for sorting hits.
+               /// </param>
+               /// <param name="reverse">True if natural order should be reversed.
+               /// </param>
+               public SortField(System.String field, FieldComparatorSource comparator, bool reverse)
+               {
+                       InitFieldType(field, CUSTOM);
+                       this.reverse = reverse;
+                       this.comparatorSource = comparator;
+               }
+               
+               // Sets field & type, and ensures field is not NULL unless
+               // type is SCORE or DOC
+               private void  InitFieldType(System.String field, int type)
+               {
+                       this.type = type;
+                       if (field == null)
+                       {
+                               if (type != SCORE && type != DOC)
+                                       throw new System.ArgumentException("field can only be null when type is SCORE or DOC");
+                       }
+                       else
+                       {
+                               this.field = StringHelper.Intern(field);
+                       }
+               }
+               
+               /// <summary>Returns the name of the field.  Could return <code>null</code>
+               /// if the sort is by SCORE or DOC.
+               /// </summary>
+               /// <returns> Name of field, possibly <code>null</code>.
+               /// </returns>
+               public virtual System.String GetField()
+               {
+                       return field;
+               }
+               
+               /// <summary>Returns the type of contents in the field.</summary>
+               /// <returns> One of the constants SCORE, DOC, AUTO, STRING, INT or FLOAT.
+               /// </returns>
+               public new virtual int GetType()
+               {
+                       return type;
+               }
+               
+               /// <summary>Returns the Locale by which term values are interpreted.
+               /// May return <code>null</code> if no Locale was specified.
+               /// </summary>
+               /// <returns> Locale, or <code>null</code>.
+               /// </returns>
+               public virtual System.Globalization.CultureInfo GetLocale()
+               {
+                       return locale;
+               }
+               
+               /// <summary>Returns the instance of a {@link FieldCache} parser that fits to the given sort type.
+               /// May return <code>null</code> if no parser was specified. Sorting is using the default parser then.
+               /// </summary>
+               /// <returns> An instance of a {@link FieldCache} parser, or <code>null</code>.
+               /// </returns>
+               public virtual Mono.Lucene.Net.Search.Parser GetParser()
+               {
+                       return parser;
+               }
+               
+               /// <summary>Returns whether the sort should be reversed.</summary>
+               /// <returns>  True if natural order should be reversed.
+               /// </returns>
+               public virtual bool GetReverse()
+               {
+                       return reverse;
+               }
+               
+               /// <deprecated> use {@link #GetComparatorSource()}
+               /// </deprecated>
+        [Obsolete("use GetComparatorSource()")]
+               public virtual SortComparatorSource GetFactory()
+               {
+                       return factory;
+               }
+               
+               public virtual FieldComparatorSource GetComparatorSource()
+               {
+                       return comparatorSource;
+               }
+               
+               /// <summary> Use legacy IndexSearch implementation: search with a DirectoryReader rather
+               /// than passing a single hit collector to multiple SegmentReaders.
+               /// 
+               /// </summary>
+               /// <param name="legacy">true for legacy behavior
+               /// </param>
+               /// <deprecated> will be removed in Lucene 3.0.
+               /// </deprecated>
+        [Obsolete("will be removed in Lucene 3.0.")]
+               public virtual void  SetUseLegacySearch(bool legacy)
+               {
+                       this.useLegacy = legacy;
+               }
+               
+               /// <returns> if true, IndexSearch will use legacy sorting search implementation.
+               /// eg. multiple Priority Queues.
+               /// </returns>
+               /// <deprecated> will be removed in Lucene 3.0.
+               /// </deprecated>
+        [Obsolete("will be removed in Lucene 3.0.")]
+               public virtual bool GetUseLegacySearch()
+               {
+                       return this.useLegacy;
+               }
+               
+               public override System.String ToString()
+               {
+                       System.Text.StringBuilder buffer = new System.Text.StringBuilder();
+                       switch (type)
+                       {
+                               
+                               case SCORE: 
+                                       buffer.Append("<score>");
+                                       break;
+                               
+                               
+                               case DOC: 
+                                       buffer.Append("<doc>");
+                                       break;
+                               
+                               
+                               case AUTO: 
+                                       buffer.Append("<auto: \"").Append(field).Append("\">");
+                                       break;
+                               
+                               
+                               case STRING: 
+                                       buffer.Append("<string: \"").Append(field).Append("\">");
+                                       break;
+                               
+                               
+                               case STRING_VAL: 
+                                       buffer.Append("<string_val: \"").Append(field).Append("\">");
+                                       break;
+                               
+                               
+                               case BYTE: 
+                                       buffer.Append("<byte: \"").Append(field).Append("\">");
+                                       break;
+                               
+                               
+                               case SHORT: 
+                                       buffer.Append("<short: \"").Append(field).Append("\">");
+                                       break;
+                               
+                               
+                               case INT: 
+                                       buffer.Append("<int: \"").Append(field).Append("\">");
+                                       break;
+                               
+                               
+                               case LONG: 
+                                       buffer.Append("<long: \"").Append(field).Append("\">");
+                                       break;
+                               
+                               
+                               case FLOAT: 
+                                       buffer.Append("<float: \"").Append(field).Append("\">");
+                                       break;
+                               
+                               
+                               case DOUBLE: 
+                                       buffer.Append("<double: \"").Append(field).Append("\">");
+                                       break;
+                               
+                               
+                               case CUSTOM: 
+                                       buffer.Append("<custom:\"").Append(field).Append("\": ").Append(factory).Append('>');
+                                       break;
+                               
+                               
+                               default: 
+                                       buffer.Append("<???: \"").Append(field).Append("\">");
+                                       break;
+                               
+                       }
+                       
+                       if (locale != null)
+                               buffer.Append('(').Append(locale).Append(')');
+                       if (parser != null)
+                               buffer.Append('(').Append(parser).Append(')');
+                       if (reverse)
+                               buffer.Append('!');
+                       
+                       return buffer.ToString();
+               }
+               
+               /// <summary>Returns true if <code>o</code> is equal to this.  If a
+               /// {@link SortComparatorSource} (deprecated) or {@link
+               /// FieldCache.Parser} was provided, it must properly
+               /// implement equals (unless a singleton is always used). 
+               /// </summary>
+               public  override bool Equals(System.Object o)
+               {
+                       if (this == o)
+                               return true;
+                       if (!(o is SortField))
+                               return false;
+                       SortField other = (SortField) o;
+                       return ((System.Object) other.field == (System.Object) this.field && other.type == this.type && other.reverse == this.reverse && (other.locale == null?this.locale == null:other.locale.Equals(this.locale)) && (other.factory == null?this.factory == null:other.factory.Equals(this.factory)) && (other.comparatorSource == null?this.comparatorSource == null:other.comparatorSource.Equals(this.comparatorSource)) && (other.parser == null?this.parser == null:other.parser.Equals(this.parser)));
+               }
+               
+               /// <summary>Returns true if <code>o</code> is equal to this.  If a
+               /// {@link SortComparatorSource} (deprecated) or {@link
+               /// FieldCache.Parser} was provided, it must properly
+               /// implement hashCode (unless a singleton is always
+               /// used). 
+               /// </summary>
+               public override int GetHashCode()
+               {
+                       int hash = type ^ 0x346565dd + (reverse ? Boolean.TrueString.GetHashCode() : Boolean.FalseString.GetHashCode()) ^ unchecked((int) 0xaf5998bb);
+                       if (field != null)
+                               hash += (field.GetHashCode() ^ unchecked((int) 0xff5685dd));
+                       if (locale != null)
+                       {
+                               hash += (locale.GetHashCode() ^ 0x08150815);
+                       }
+                       if (factory != null)
+                               hash += (factory.GetHashCode() ^ 0x34987555);
+                       if (comparatorSource != null)
+                               hash += comparatorSource.GetHashCode();
+                       if (parser != null)
+                               hash += (parser.GetHashCode() ^ 0x3aaf56ff);
+                       return hash;
+               }
+               
+        
+       //// field must be interned after reading from stream
+       // private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+       //  in.defaultReadObject();
+       //  if (field != null)
+       //    field = StringHelper.intern(field);
+       // }
+
+        [System.Runtime.Serialization.OnDeserialized]
+        internal void OnDeserialized(System.Runtime.Serialization.StreamingContext context)
+        {
+            field = StringHelper.Intern(field);
+        }
+               
+               /// <summary>Returns the {@link FieldComparator} to use for
+               /// sorting.
+               /// 
+               /// <b>NOTE:</b> This API is experimental and might change in
+               /// incompatible ways in the next release.
+               /// 
+               /// </summary>
+               /// <param name="numHits">number of top hits the queue will store
+               /// </param>
+               /// <param name="sortPos">position of this SortField within {@link
+               /// Sort}.  The comparator is primary if sortPos==0,
+               /// secondary if sortPos==1, etc.  Some comparators can
+               /// optimize themselves when they are the primary sort.
+               /// </param>
+               /// <returns> {@link FieldComparator} to use when sorting
+               /// </returns>
+               public virtual FieldComparator GetComparator(int numHits, int sortPos)
+               {
+                       
+                       if (locale != null)
+                       {
+                               // TODO: it'd be nice to allow FieldCache.getStringIndex
+                               // to optionally accept a Locale so sorting could then use
+                               // the faster StringComparator impls
+                               return new FieldComparator.StringComparatorLocale(numHits, field, locale);
+                       }
+                       
+                       switch (type)
+                       {
+                               
+                               case SortField.SCORE: 
+                                       return new FieldComparator.RelevanceComparator(numHits);
+                               
+                               
+                               case SortField.DOC: 
+                                       return new FieldComparator.DocComparator(numHits);
+                               
+                               
+                               case SortField.INT: 
+                                       return new FieldComparator.IntComparator(numHits, field, parser);
+                               
+                               
+                               case SortField.FLOAT: 
+                                       return new FieldComparator.FloatComparator(numHits, field, parser);
+                               
+                               
+                               case SortField.LONG: 
+                                       return new FieldComparator.LongComparator(numHits, field, parser);
+                               
+                               
+                               case SortField.DOUBLE: 
+                                       return new FieldComparator.DoubleComparator(numHits, field, parser);
+                               
+                               
+                               case SortField.BYTE: 
+                                       return new FieldComparator.ByteComparator(numHits, field, parser);
+                               
+                               
+                               case SortField.SHORT: 
+                                       return new FieldComparator.ShortComparator(numHits, field, parser);
+                               
+                               
+                               case SortField.CUSTOM: 
+                                       System.Diagnostics.Debug.Assert(factory == null && comparatorSource != null);
+                                       return comparatorSource.NewComparator(field, numHits, sortPos, reverse);
+                               
+                               
+                               case SortField.STRING: 
+                                       return new FieldComparator.StringOrdValComparator(numHits, field, sortPos, reverse);
+                               
+                               
+                               case SortField.STRING_VAL: 
+                                       return new FieldComparator.StringValComparator(numHits, field);
+                               
+                               
+                               default: 
+                                       throw new System.SystemException("Illegal sort type: " + type);
+                               
+                       }
+               }
+               
+               /// <summary> Attempts to detect the given field type for an IndexReader.</summary>
+               /// <deprecated>
+               /// </deprecated>
+        [Obsolete]
+               internal static int DetectFieldType(IndexReader reader, System.String fieldKey)
+               {
+                       System.String field = StringHelper.Intern(fieldKey);
+                       TermEnum enumerator = reader.Terms(new Term(field));
+                       try
+                       {
+                               Term term = enumerator.Term();
+                               if (term == null)
+                               {
+                                       throw new System.SystemException("no terms in field " + field + " - cannot determine sort type");
+                               }
+                               int ret = 0;
+                               if ((System.Object) term.Field() == (System.Object) field)
+                               {
+                                       System.String termtext = term.Text().Trim();
+                    
+                    int tmpI32; long tmpI64; float tmpF;
+                    if      (System.Int32.TryParse(termtext, out tmpI32))       ret = SortField.INT;
+                    else if (System.Int64.TryParse(termtext, out tmpI64))       ret = SortField.LONG;
+                    else if (SupportClass.Single.TryParse(termtext, out tmpF))  ret = SortField.FLOAT;
+                    else ret = SortField.STRING;
+                               }
+                               else
+                               {
+                                       throw new System.SystemException("field \"" + field + "\" does not appear to be indexed");
+                               }
+                               return ret;
+                       }
+                       finally
+                       {
+                               enumerator.Close();
+                       }
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/SpanFilter.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/SpanFilter.cs
new file mode 100644 (file)
index 0000000..894e9ab
--- /dev/null
@@ -0,0 +1,47 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary>Abstract base class providing a mechanism to restrict searches to a subset
+       /// of an index and also maintains and returns position information.
+       /// This is useful if you want to compare the positions from a SpanQuery with the positions of items in
+       /// a filter.  For instance, if you had a SpanFilter that marked all the occurrences of the word "foo" in documents,
+       /// and then you entered a new SpanQuery containing bar, you could not only filter by the word foo, but you could
+       /// then compare position information for post processing.
+       /// </summary>
+       [Serializable]
+       public abstract class SpanFilter:Filter
+       {
+               /// <summary>Returns a SpanFilterResult with true for documents which should be permitted in
+               /// search results, and false for those that should not and Spans for where the true docs match.
+               /// </summary>
+               /// <param name="reader">The {@link Mono.Lucene.Net.Index.IndexReader} to load position and DocIdSet information from
+               /// </param>
+               /// <returns> A {@link SpanFilterResult}
+               /// </returns>
+               /// <throws>  java.io.IOException if there was an issue accessing the necessary information </throws>
+               /// <summary> 
+               /// </summary>
+               public abstract SpanFilterResult BitSpans(IndexReader reader);
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/SpanFilterResult.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/SpanFilterResult.cs
new file mode 100644 (file)
index 0000000..9e814ee
--- /dev/null
@@ -0,0 +1,150 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       
+       /// <summary>  The results of a SpanQueryFilter.  Wraps the BitSet and the position information from the SpanQuery
+       /// 
+       /// <p/>
+       /// NOTE: This API is still experimental and subject to change. 
+       /// 
+       /// 
+       /// </summary>
+       public class SpanFilterResult
+       {
+               /// <deprecated> 
+               /// </deprecated>
+        [Obsolete]
+               private System.Collections.BitArray bits;
+               
+               private DocIdSet docIdSet;
+               private System.Collections.IList positions; //Spans spans;
+               
+               /// <summary> </summary>
+               /// <param name="bits">The bits for the Filter
+               /// </param>
+               /// <param name="positions">A List of {@link Mono.Lucene.Net.Search.SpanFilterResult.PositionInfo} objects
+               /// </param>
+               /// <deprecated> Use {@link #SpanFilterResult(DocIdSet, List)} instead
+               /// </deprecated>
+        [Obsolete("Use SpanFilterResult(DocIdSet, List) instead")]
+               public SpanFilterResult(System.Collections.BitArray bits, System.Collections.IList positions)
+               {
+                       this.bits = bits;
+                       this.positions = positions;
+               }
+               
+               /// <summary> </summary>
+               /// <param name="docIdSet">The DocIdSet for the Filter
+               /// </param>
+               /// <param name="positions">A List of {@link Mono.Lucene.Net.Search.SpanFilterResult.PositionInfo} objects
+               /// </param>
+               public SpanFilterResult(DocIdSet docIdSet, System.Collections.IList positions)
+               {
+                       this.docIdSet = docIdSet;
+                       this.positions = positions;
+               }
+               
+               /// <summary> The first entry in the array corresponds to the first "on" bit.
+               /// Entries are increasing by document order
+               /// </summary>
+               /// <returns> A List of PositionInfo objects
+               /// </returns>
+               public virtual System.Collections.IList GetPositions()
+               {
+                       return positions;
+               }
+               
+               /// <deprecated> Use {@link #GetDocIdSet()}
+               /// </deprecated>
+        [Obsolete("Use GetDocIdSet()")]
+               public virtual System.Collections.BitArray GetBits()
+               {
+                       return bits;
+               }
+               
+               /// <summary>Returns the docIdSet </summary>
+               public virtual DocIdSet GetDocIdSet()
+               {
+                       return docIdSet;
+               }
+               
+               public class PositionInfo
+               {
+                       private int doc;
+                       private System.Collections.IList positions;
+                       
+                       
+                       public PositionInfo(int doc)
+                       {
+                               this.doc = doc;
+                               positions = new System.Collections.ArrayList();
+                       }
+                       
+                       public virtual void  AddPosition(int start, int end)
+                       {
+                               positions.Add(new StartEnd(start, end));
+                       }
+                       
+                       public virtual int GetDoc()
+                       {
+                               return doc;
+                       }
+                       
+                       /// <summary> </summary>
+                       /// <returns> A List of {@link Mono.Lucene.Net.Search.SpanFilterResult.StartEnd} objects
+                       /// </returns>
+                       public virtual System.Collections.IList GetPositions()
+                       {
+                               return positions;
+                       }
+               }
+               
+               public class StartEnd
+               {
+                       private int start;
+                       private int end;
+                       
+                       
+                       public StartEnd(int start, int end)
+                       {
+                               this.start = start;
+                               this.end = end;
+                       }
+                       
+                       /// <summary> </summary>
+                       /// <returns> The end position of this match
+                       /// </returns>
+                       public virtual int GetEnd()
+                       {
+                               return end;
+                       }
+                       
+                       /// <summary> The Start position</summary>
+                       /// <returns> The start position of this match
+                       /// </returns>
+                       public virtual int GetStart()
+                       {
+                               return start;
+                       }
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/SpanQueryFilter.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/SpanQueryFilter.cs
new file mode 100644 (file)
index 0000000..df24ab7
--- /dev/null
@@ -0,0 +1,109 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+using OpenBitSet = Mono.Lucene.Net.Util.OpenBitSet;
+using SpanQuery = Mono.Lucene.Net.Search.Spans.SpanQuery;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary> Constrains search results to only match those which also match a provided
+       /// query. Also provides position information about where each document matches
+       /// at the cost of extra space compared with the QueryWrapperFilter.
+       /// There is an added cost to this above what is stored in a {@link QueryWrapperFilter}.  Namely,
+       /// the position information for each matching document is stored.
+       /// <p/>
+       /// This filter does not cache.  See the {@link Mono.Lucene.Net.Search.CachingSpanFilter} for a wrapper that
+       /// caches.
+       /// 
+       /// 
+       /// </summary>
+       /// <version>  $Id:$
+       /// </version>
+       [Serializable]
+       public class SpanQueryFilter:SpanFilter
+       {
+               protected internal SpanQuery query;
+               
+               protected internal SpanQueryFilter()
+               {
+               }
+               
+               /// <summary>Constructs a filter which only matches documents matching
+               /// <code>query</code>.
+               /// </summary>
+               /// <param name="query">The {@link Mono.Lucene.Net.Search.Spans.SpanQuery} to use as the basis for the Filter.
+               /// </param>
+               public SpanQueryFilter(SpanQuery query)
+               {
+                       this.query = query;
+               }
+               
+               public override DocIdSet GetDocIdSet(IndexReader reader)
+               {
+                       SpanFilterResult result = BitSpans(reader);
+                       return result.GetDocIdSet();
+               }
+               
+               public override SpanFilterResult BitSpans(IndexReader reader)
+               {
+                       
+                       OpenBitSet bits = new OpenBitSet(reader.MaxDoc());
+                       Mono.Lucene.Net.Search.Spans.Spans spans = query.GetSpans(reader);
+                       System.Collections.IList tmp = new System.Collections.ArrayList(20);
+                       int currentDoc = - 1;
+                       SpanFilterResult.PositionInfo currentInfo = null;
+                       while (spans.Next())
+                       {
+                               int doc = spans.Doc();
+                               bits.Set(doc);
+                               if (currentDoc != doc)
+                               {
+                                       currentInfo = new SpanFilterResult.PositionInfo(doc);
+                                       tmp.Add(currentInfo);
+                                       currentDoc = doc;
+                               }
+                               currentInfo.AddPosition(spans.Start(), spans.End());
+                       }
+                       return new SpanFilterResult(bits, tmp);
+               }
+               
+               
+               public virtual SpanQuery GetQuery()
+               {
+                       return query;
+               }
+               
+               public override System.String ToString()
+               {
+                       return "SpanQueryFilter(" + query + ")";
+               }
+               
+               public  override bool Equals(System.Object o)
+               {
+                       return o is SpanQueryFilter && this.query.Equals(((SpanQueryFilter) o).query);
+               }
+               
+               public override int GetHashCode()
+               {
+                       return query.GetHashCode() ^ unchecked((int) 0x923F64B9);
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Spans/FieldMaskingSpanQuery.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Spans/FieldMaskingSpanQuery.cs
new file mode 100644 (file)
index 0000000..1fd8acf
--- /dev/null
@@ -0,0 +1,170 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+using ToStringUtils = Mono.Lucene.Net.Util.ToStringUtils;
+using Query = Mono.Lucene.Net.Search.Query;
+using Searcher = Mono.Lucene.Net.Search.Searcher;
+using Similarity = Mono.Lucene.Net.Search.Similarity;
+using Weight = Mono.Lucene.Net.Search.Weight;
+
+namespace Mono.Lucene.Net.Search.Spans
+{
+       
+       /// <summary> <p/>Wrapper to allow {@link SpanQuery} objects participate in composite 
+       /// single-field SpanQueries by 'lying' about their search field. That is, 
+       /// the masked SpanQuery will function as normal, 
+       /// but {@link SpanQuery#GetField()} simply hands back the value supplied 
+       /// in this class's constructor.<p/>
+       /// 
+       /// <p/>This can be used to support Queries like {@link SpanNearQuery} or 
+       /// {@link SpanOrQuery} across different fields, which is not ordinarily 
+       /// permitted.<p/>
+       /// 
+       /// <p/>This can be useful for denormalized relational data: for example, when 
+       /// indexing a document with conceptually many 'children': <p/>
+       /// 
+       /// <pre>
+       /// teacherid: 1
+       /// studentfirstname: james
+       /// studentsurname: jones
+       /// 
+       /// teacherid: 2
+       /// studenfirstname: james
+       /// studentsurname: smith
+       /// studentfirstname: sally
+       /// studentsurname: jones
+       /// </pre>
+       /// 
+       /// <p/>a SpanNearQuery with a slop of 0 can be applied across two 
+       /// {@link SpanTermQuery} objects as follows:
+       /// <pre>
+       /// SpanQuery q1  = new SpanTermQuery(new Term("studentfirstname", "james"));
+       /// SpanQuery q2  = new SpanTermQuery(new Term("studentsurname", "jones"));
+       /// SpanQuery q2m new FieldMaskingSpanQuery(q2, "studentfirstname");
+       /// Query q = new SpanNearQuery(new SpanQuery[]{q1, q2m}, -1, false);
+       /// </pre>
+       /// to search for 'studentfirstname:james studentsurname:jones' and find 
+       /// teacherid 1 without matching teacherid 2 (which has a 'james' in position 0 
+       /// and 'jones' in position 1). <p/>
+       /// 
+       /// <p/>Note: as {@link #GetField()} returns the masked field, scoring will be 
+       /// done using the norms of the field name supplied. This may lead to unexpected
+       /// scoring behaviour.<p/>
+       /// </summary>
+       [Serializable]
+       public class FieldMaskingSpanQuery:SpanQuery
+       {
+               private SpanQuery maskedQuery;
+               private System.String field;
+               
+               public FieldMaskingSpanQuery(SpanQuery maskedQuery, System.String maskedField)
+               {
+                       this.maskedQuery = maskedQuery;
+                       this.field = maskedField;
+               }
+               
+               public override System.String GetField()
+               {
+                       return field;
+               }
+               
+               public virtual SpanQuery GetMaskedQuery()
+               {
+                       return maskedQuery;
+               }
+               
+               // :NOTE: getBoost and setBoost are not proxied to the maskedQuery
+               // ...this is done to be more consistent with thigns like SpanFirstQuery
+               
+               public override Spans GetSpans(IndexReader reader)
+               {
+                       return maskedQuery.GetSpans(reader);
+               }
+               
+               /// <deprecated> use {@link #ExtractTerms(Set)} instead. 
+               /// </deprecated>
+        [Obsolete("use ExtractTerms(Hashtable) instead.")]
+               public override System.Collections.ICollection GetTerms()
+               {
+                       return maskedQuery.GetTerms();
+               }
+               
+               public override void  ExtractTerms(System.Collections.Hashtable terms)
+               {
+                       maskedQuery.ExtractTerms(terms);
+               }
+               
+               public override Weight CreateWeight(Searcher searcher)
+               {
+                       return maskedQuery.CreateWeight(searcher);
+               }
+               
+               public override Similarity GetSimilarity(Searcher searcher)
+               {
+                       return maskedQuery.GetSimilarity(searcher);
+               }
+               
+               public override Query Rewrite(IndexReader reader)
+               {
+                       FieldMaskingSpanQuery clone = null;
+                       
+                       SpanQuery rewritten = (SpanQuery) maskedQuery.Rewrite(reader);
+                       if (rewritten != maskedQuery)
+                       {
+                               clone = (FieldMaskingSpanQuery) this.Clone();
+                               clone.maskedQuery = rewritten;
+                       }
+                       
+                       if (clone != null)
+                       {
+                               return clone;
+                       }
+                       else
+                       {
+                               return this;
+                       }
+               }
+               
+               public override System.String ToString(System.String field)
+               {
+                       System.Text.StringBuilder buffer = new System.Text.StringBuilder();
+                       buffer.Append("mask(");
+                       buffer.Append(maskedQuery.ToString(field));
+                       buffer.Append(")");
+                       buffer.Append(ToStringUtils.Boost(GetBoost()));
+                       buffer.Append(" as ");
+                       buffer.Append(this.field);
+                       return buffer.ToString();
+               }
+               
+               public  override bool Equals(System.Object o)
+               {
+                       if (!(o is FieldMaskingSpanQuery))
+                               return false;
+                       FieldMaskingSpanQuery other = (FieldMaskingSpanQuery) o;
+                       return (this.GetField().Equals(other.GetField()) && (this.GetBoost() == other.GetBoost()) && this.GetMaskedQuery().Equals(other.GetMaskedQuery()));
+               }
+               
+               public override int GetHashCode()
+               {
+                       return GetMaskedQuery().GetHashCode() ^ GetField().GetHashCode() ^ System.Convert.ToInt32(GetBoost());
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Spans/NearSpansOrdered.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Spans/NearSpansOrdered.cs
new file mode 100644 (file)
index 0000000..7a14736
--- /dev/null
@@ -0,0 +1,434 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+
+namespace Mono.Lucene.Net.Search.Spans
+{
+       
+       /// <summary>A Spans that is formed from the ordered subspans of a SpanNearQuery
+       /// where the subspans do not overlap and have a maximum slop between them.
+       /// <p/>
+       /// The formed spans only contains minimum slop matches.<br/>
+       /// The matching slop is computed from the distance(s) between
+       /// the non overlapping matching Spans.<br/>
+       /// Successive matches are always formed from the successive Spans
+       /// of the SpanNearQuery.
+       /// <p/>
+       /// The formed spans may contain overlaps when the slop is at least 1.
+       /// For example, when querying using
+       /// <pre>t1 t2 t3</pre>
+       /// with slop at least 1, the fragment:
+       /// <pre>t1 t2 t1 t3 t2 t3</pre>
+       /// matches twice:
+       /// <pre>t1 t2 .. t3      </pre>
+       /// <pre>      t1 .. t2 t3</pre>
+       /// 
+       /// 
+       /// Expert:
+       /// Only public for subclassing.  Most implementations should not need this class
+       /// </summary>
+       public class NearSpansOrdered:Spans
+       {
+               internal class AnonymousClassComparator : System.Collections.IComparer
+               {
+                       public AnonymousClassComparator(NearSpansOrdered enclosingInstance)
+                       {
+                               InitBlock(enclosingInstance);
+                       }
+                       private void  InitBlock(NearSpansOrdered enclosingInstance)
+                       {
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private NearSpansOrdered enclosingInstance;
+                       public NearSpansOrdered Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       public virtual int Compare(System.Object o1, System.Object o2)
+                       {
+                               return ((Spans) o1).Doc() - ((Spans) o2).Doc();
+                       }
+               }
+               private void  InitBlock()
+               {
+                       spanDocComparator = new AnonymousClassComparator(this);
+               }
+               private int allowedSlop;
+               private bool firstTime = true;
+               private bool more = false;
+               
+               /// <summary>The spans in the same order as the SpanNearQuery </summary>
+               private Spans[] subSpans;
+               
+               /// <summary>Indicates that all subSpans have same doc() </summary>
+               private bool inSameDoc = false;
+               
+               private int matchDoc = - 1;
+               private int matchStart = - 1;
+               private int matchEnd = - 1;
+               private System.Collections.Generic.List<byte[]> matchPayload;
+               
+               private Spans[] subSpansByDoc;
+               private System.Collections.IComparer spanDocComparator;
+               
+               private SpanNearQuery query;
+               private bool collectPayloads = true;
+               
+               public NearSpansOrdered(SpanNearQuery spanNearQuery, IndexReader reader):this(spanNearQuery, reader, true)
+               {
+               }
+               
+               public NearSpansOrdered(SpanNearQuery spanNearQuery, IndexReader reader, bool collectPayloads)
+               {
+                       InitBlock();
+                       if (spanNearQuery.GetClauses().Length < 2)
+                       {
+                               throw new System.ArgumentException("Less than 2 clauses: " + spanNearQuery);
+                       }
+                       this.collectPayloads = collectPayloads;
+                       allowedSlop = spanNearQuery.GetSlop();
+                       SpanQuery[] clauses = spanNearQuery.GetClauses();
+                       subSpans = new Spans[clauses.Length];
+                       matchPayload = new System.Collections.Generic.List<byte[]>();
+                       subSpansByDoc = new Spans[clauses.Length];
+                       for (int i = 0; i < clauses.Length; i++)
+                       {
+                               subSpans[i] = clauses[i].GetSpans(reader);
+                               subSpansByDoc[i] = subSpans[i]; // used in toSameDoc()
+                       }
+                       query = spanNearQuery; // kept for toString() only.
+               }
+               
+               // inherit javadocs
+               public override int Doc()
+               {
+                       return matchDoc;
+               }
+               
+               // inherit javadocs
+               public override int Start()
+               {
+                       return matchStart;
+               }
+               
+               // inherit javadocs
+               public override int End()
+               {
+                       return matchEnd;
+               }
+               
+               public virtual Spans[] GetSubSpans()
+               {
+                       return subSpans;
+               }
+               
+               // TODO: Remove warning after API has been finalized
+               // TODO: Would be nice to be able to lazy load payloads
+               public override System.Collections.Generic.ICollection<byte[]> GetPayload()
+               {
+                       return matchPayload;
+               }
+               
+               // TODO: Remove warning after API has been finalized
+               public override bool IsPayloadAvailable()
+               {
+                       return (matchPayload.Count == 0) == false;
+               }
+               
+               // inherit javadocs
+               public override bool Next()
+               {
+                       if (firstTime)
+                       {
+                               firstTime = false;
+                               for (int i = 0; i < subSpans.Length; i++)
+                               {
+                                       if (!subSpans[i].Next())
+                                       {
+                                               more = false;
+                                               return false;
+                                       }
+                               }
+                               more = true;
+                       }
+                       if (collectPayloads)
+                       {
+                               matchPayload.Clear();
+                       }
+                       return AdvanceAfterOrdered();
+               }
+               
+               // inherit javadocs
+               public override bool SkipTo(int target)
+               {
+                       if (firstTime)
+                       {
+                               firstTime = false;
+                               for (int i = 0; i < subSpans.Length; i++)
+                               {
+                                       if (!subSpans[i].SkipTo(target))
+                                       {
+                                               more = false;
+                                               return false;
+                                       }
+                               }
+                               more = true;
+                       }
+                       else if (more && (subSpans[0].Doc() < target))
+                       {
+                               if (subSpans[0].SkipTo(target))
+                               {
+                                       inSameDoc = false;
+                               }
+                               else
+                               {
+                                       more = false;
+                                       return false;
+                               }
+                       }
+                       if (collectPayloads)
+                       {
+                               matchPayload.Clear();
+                       }
+                       return AdvanceAfterOrdered();
+               }
+               
+               /// <summary>Advances the subSpans to just after an ordered match with a minimum slop
+               /// that is smaller than the slop allowed by the SpanNearQuery.
+               /// </summary>
+               /// <returns> true iff there is such a match.
+               /// </returns>
+               private bool AdvanceAfterOrdered()
+               {
+                       while (more && (inSameDoc || ToSameDoc()))
+                       {
+                               if (StretchToOrder() && ShrinkToAfterShortestMatch())
+                               {
+                                       return true;
+                               }
+                       }
+                       return false; // no more matches
+               }
+               
+               
+               /// <summary>Advance the subSpans to the same document </summary>
+               private bool ToSameDoc()
+               {
+                       System.Array.Sort(subSpansByDoc, spanDocComparator);
+                       int firstIndex = 0;
+                       int maxDoc = subSpansByDoc[subSpansByDoc.Length - 1].Doc();
+                       while (subSpansByDoc[firstIndex].Doc() != maxDoc)
+                       {
+                               if (!subSpansByDoc[firstIndex].SkipTo(maxDoc))
+                               {
+                                       more = false;
+                                       inSameDoc = false;
+                                       return false;
+                               }
+                               maxDoc = subSpansByDoc[firstIndex].Doc();
+                               if (++firstIndex == subSpansByDoc.Length)
+                               {
+                                       firstIndex = 0;
+                               }
+                       }
+                       for (int i = 0; i < subSpansByDoc.Length; i++)
+                       {
+                               System.Diagnostics.Debug.Assert((subSpansByDoc [i].Doc() == maxDoc)
+                                       , "NearSpansOrdered.toSameDoc() spans " + subSpansByDoc [0] 
+                                       + "\n at doc " + subSpansByDoc [i].Doc() 
+                                       + ", but should be at " + maxDoc);
+                       }
+                       inSameDoc = true;
+                       return true;
+               }
+               
+               /// <summary>Check whether two Spans in the same document are ordered.</summary>
+               /// <param name="spans1">
+               /// </param>
+               /// <param name="spans2">
+               /// </param>
+               /// <returns> true iff spans1 starts before spans2
+               /// or the spans start at the same position,
+               /// and spans1 ends before spans2.
+               /// </returns>
+               internal static bool DocSpansOrdered(Spans spans1, Spans spans2)
+               {
+                       System.Diagnostics.Debug.Assert(spans1.Doc() == spans2.Doc(), "doc1 " + spans1.Doc() + " != doc2 " + spans2.Doc());
+                       int start1 = spans1.Start();
+                       int start2 = spans2.Start();
+                       /* Do not call docSpansOrdered(int,int,int,int) to avoid invoking .end() : */
+                       return (start1 == start2)?(spans1.End() < spans2.End()):(start1 < start2);
+               }
+               
+               /// <summary>Like {@link #DocSpansOrdered(Spans,Spans)}, but use the spans
+               /// starts and ends as parameters.
+               /// </summary>
+               private static bool DocSpansOrdered(int start1, int end1, int start2, int end2)
+               {
+                       return (start1 == start2)?(end1 < end2):(start1 < start2);
+               }
+               
+               /// <summary>Order the subSpans within the same document by advancing all later spans
+               /// after the previous one.
+               /// </summary>
+               private bool StretchToOrder()
+               {
+                       matchDoc = subSpans[0].Doc();
+                       for (int i = 1; inSameDoc && (i < subSpans.Length); i++)
+                       {
+                               while (!DocSpansOrdered(subSpans[i - 1], subSpans[i]))
+                               {
+                                       if (!subSpans[i].Next())
+                                       {
+                                               inSameDoc = false;
+                                               more = false;
+                                               break;
+                                       }
+                                       else if (matchDoc != subSpans[i].Doc())
+                                       {
+                                               inSameDoc = false;
+                                               break;
+                                       }
+                               }
+                       }
+                       return inSameDoc;
+               }
+               
+               /// <summary>The subSpans are ordered in the same doc, so there is a possible match.
+               /// Compute the slop while making the match as short as possible by advancing
+               /// all subSpans except the last one in reverse order.
+               /// </summary>
+               private bool ShrinkToAfterShortestMatch()
+               {
+                       matchStart = subSpans[subSpans.Length - 1].Start();
+                       matchEnd = subSpans[subSpans.Length - 1].End();
+            System.Collections.Generic.Dictionary<byte[], byte[]> possibleMatchPayloads = new System.Collections.Generic.Dictionary<byte[], byte[]>();
+                       if (subSpans[subSpans.Length - 1].IsPayloadAvailable())
+                       {
+                System.Collections.Generic.ICollection<byte[]> payload = subSpans[subSpans.Length - 1].GetPayload();
+                foreach(byte[] pl in payload)
+                {
+                    if (!possibleMatchPayloads.ContainsKey(pl))
+                    {
+                        possibleMatchPayloads.Add(pl, pl);
+                    }
+                }
+                       }
+                       
+                       System.Collections.Generic.List<byte[]> possiblePayload = null;
+                       
+                       int matchSlop = 0;
+                       int lastStart = matchStart;
+                       int lastEnd = matchEnd;
+                       for (int i = subSpans.Length - 2; i >= 0; i--)
+                       {
+                               Spans prevSpans = subSpans[i];
+                               if (collectPayloads && prevSpans.IsPayloadAvailable())
+                               {
+                                       System.Collections.Generic.ICollection<byte[]> payload = prevSpans.GetPayload();
+                                       possiblePayload = new System.Collections.Generic.List<byte[]>(payload.Count);
+                                       possiblePayload.AddRange(payload);
+                               }
+                               
+                               int prevStart = prevSpans.Start();
+                               int prevEnd = prevSpans.End();
+                               while (true)
+                               {
+                                       // Advance prevSpans until after (lastStart, lastEnd)
+                                       if (!prevSpans.Next())
+                                       {
+                                               inSameDoc = false;
+                                               more = false;
+                                               break; // Check remaining subSpans for final match.
+                                       }
+                                       else if (matchDoc != prevSpans.Doc())
+                                       {
+                                               inSameDoc = false; // The last subSpans is not advanced here.
+                                               break; // Check remaining subSpans for last match in this document.
+                                       }
+                                       else
+                                       {
+                                               int ppStart = prevSpans.Start();
+                                               int ppEnd = prevSpans.End(); // Cannot avoid invoking .end()
+                                               if (!DocSpansOrdered(ppStart, ppEnd, lastStart, lastEnd))
+                                               {
+                                                       break; // Check remaining subSpans.
+                                               }
+                                               else
+                                               {
+                                                       // prevSpans still before (lastStart, lastEnd)
+                                                       prevStart = ppStart;
+                                                       prevEnd = ppEnd;
+                                                       if (collectPayloads && prevSpans.IsPayloadAvailable())
+                                                       {
+                                                               System.Collections.Generic.ICollection<byte[]> payload = prevSpans.GetPayload();
+                                                               possiblePayload = new System.Collections.Generic.List<byte[]>(payload.Count);
+                                                               possiblePayload.AddRange(payload);
+                                                       }
+                                               }
+                                       }
+                               }
+                               
+                               if (collectPayloads && possiblePayload != null)
+                               {
+                    foreach (byte[] pl in possiblePayload)
+                    {
+                        if (!possibleMatchPayloads.ContainsKey(pl))
+                        {
+                            possibleMatchPayloads.Add(pl, pl);
+                        }
+                    }
+                               }
+                               
+                               System.Diagnostics.Debug.Assert(prevStart <= matchStart);
+                               if (matchStart > prevEnd)
+                               {
+                                       // Only non overlapping spans add to slop.
+                                       matchSlop += (matchStart - prevEnd);
+                               }
+                               
+                               /* Do not break on (matchSlop > allowedSlop) here to make sure
+                               * that subSpans[0] is advanced after the match, if any.
+                               */
+                               matchStart = prevStart;
+                               lastStart = prevStart;
+                               lastEnd = prevEnd;
+                       }
+                       
+                       bool match = matchSlop <= allowedSlop;
+                       
+                       if (collectPayloads && match && possibleMatchPayloads.Count > 0)
+                       {
+                               matchPayload.AddRange(possibleMatchPayloads.Keys);
+                       }
+                       
+                       return match; // ordered and allowed slop
+               }
+               
+               public override System.String ToString()
+               {
+                       return GetType().FullName + "(" + query.ToString() + ")@" + (firstTime?"START":(more?(Doc() + ":" + Start() + "-" + End()):"END"));
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Spans/NearSpansUnordered.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Spans/NearSpansUnordered.cs
new file mode 100644 (file)
index 0000000..6765d3e
--- /dev/null
@@ -0,0 +1,421 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+using PriorityQueue = Mono.Lucene.Net.Util.PriorityQueue;
+
+namespace Mono.Lucene.Net.Search.Spans
+{
+       
+       /// <summary> Similar to {@link NearSpansOrdered}, but for the unordered case.
+       /// 
+       /// Expert:
+       /// Only public for subclassing.  Most implementations should not need this class
+       /// </summary>
+       public class NearSpansUnordered:Spans
+       {
+               private SpanNearQuery query;
+               
+               private System.Collections.IList ordered = new System.Collections.ArrayList(); // spans in query order
+               private Spans[] subSpans;
+               private int slop; // from query
+               
+               private SpansCell first; // linked list of spans
+               private SpansCell last; // sorted by doc only
+               
+               private int totalLength; // sum of current lengths
+               
+               private CellQueue queue; // sorted queue of spans
+               private SpansCell max; // max element in queue
+               
+               private bool more = true; // true iff not done
+               private bool firstTime = true; // true before first next()
+               
+               private class CellQueue:PriorityQueue
+               {
+                       private void  InitBlock(NearSpansUnordered enclosingInstance)
+                       {
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private NearSpansUnordered enclosingInstance;
+                       public NearSpansUnordered Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       public CellQueue(NearSpansUnordered enclosingInstance, int size)
+                       {
+                               InitBlock(enclosingInstance);
+                               Initialize(size);
+                       }
+                       
+                       public override bool LessThan(System.Object o1, System.Object o2)
+                       {
+                               SpansCell spans1 = (SpansCell) o1;
+                               SpansCell spans2 = (SpansCell) o2;
+                               if (spans1.Doc() == spans2.Doc())
+                               {
+                                       return NearSpansOrdered.DocSpansOrdered(spans1, spans2);
+                               }
+                               else
+                               {
+                                       return spans1.Doc() < spans2.Doc();
+                               }
+                       }
+               }
+               
+               
+               /// <summary>Wraps a Spans, and can be used to form a linked list. </summary>
+               private class SpansCell:Spans
+               {
+                       private void  InitBlock(NearSpansUnordered enclosingInstance)
+                       {
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private NearSpansUnordered enclosingInstance;
+                       public NearSpansUnordered Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       internal /*private*/ Spans spans;
+                       internal /*private*/ SpansCell next;
+                       private int length = - 1;
+                       private int index;
+                       
+                       public SpansCell(NearSpansUnordered enclosingInstance, Spans spans, int index)
+                       {
+                               InitBlock(enclosingInstance);
+                               this.spans = spans;
+                               this.index = index;
+                       }
+                       
+                       public override bool Next()
+                       {
+                               return Adjust(spans.Next());
+                       }
+                       
+                       public override bool SkipTo(int target)
+                       {
+                               return Adjust(spans.SkipTo(target));
+                       }
+                       
+                       private bool Adjust(bool condition)
+                       {
+                               if (length != - 1)
+                               {
+                                       Enclosing_Instance.totalLength -= length; // subtract old length
+                               }
+                               if (condition)
+                               {
+                                       length = End() - Start();
+                                       Enclosing_Instance.totalLength += length; // add new length
+                                       
+                                       if (Enclosing_Instance.max == null || Doc() > Enclosing_Instance.max.Doc() || (Doc() == Enclosing_Instance.max.Doc()) && (End() > Enclosing_Instance.max.End()))
+                                       {
+                                               Enclosing_Instance.max = this;
+                                       }
+                               }
+                               Enclosing_Instance.more = condition;
+                               return condition;
+                       }
+                       
+                       public override int Doc()
+                       {
+                               return spans.Doc();
+                       }
+                       public override int Start()
+                       {
+                               return spans.Start();
+                       }
+                       public override int End()
+                       {
+                               return spans.End();
+                       }
+                       // TODO: Remove warning after API has been finalized
+                       public override System.Collections.Generic.ICollection<byte[]> GetPayload()
+                       {
+                               return spans.GetPayload();
+                       }
+                       
+                       // TODO: Remove warning after API has been finalized
+                       public override bool IsPayloadAvailable()
+                       {
+                               return spans.IsPayloadAvailable();
+                       }
+                       
+                       public override System.String ToString()
+                       {
+                               return spans.ToString() + "#" + index;
+                       }
+               }
+               
+               
+               public NearSpansUnordered(SpanNearQuery query, IndexReader reader)
+               {
+                       this.query = query;
+                       this.slop = query.GetSlop();
+                       
+                       SpanQuery[] clauses = query.GetClauses();
+                       queue = new CellQueue(this, clauses.Length);
+                       subSpans = new Spans[clauses.Length];
+                       for (int i = 0; i < clauses.Length; i++)
+                       {
+                               SpansCell cell = new SpansCell(this, clauses[i].GetSpans(reader), i);
+                               ordered.Add(cell);
+                               subSpans[i] = cell.spans;
+                       }
+               }
+               public virtual Spans[] GetSubSpans()
+               {
+                       return subSpans;
+               }
+               public override bool Next()
+               {
+                       if (firstTime)
+                       {
+                               InitList(true);
+                               ListToQueue(); // initialize queue
+                               firstTime = false;
+                       }
+                       else if (more)
+                       {
+                               if (Min().Next())
+                               {
+                                       // trigger further scanning
+                                       queue.AdjustTop(); // maintain queue
+                               }
+                               else
+                               {
+                                       more = false;
+                               }
+                       }
+                       
+                       while (more)
+                       {
+                               
+                               bool queueStale = false;
+                               
+                               if (Min().Doc() != max.Doc())
+                               {
+                                       // maintain list
+                                       QueueToList();
+                                       queueStale = true;
+                               }
+                               
+                               // skip to doc w/ all clauses
+                               
+                               while (more && first.Doc() < last.Doc())
+                               {
+                                       more = first.SkipTo(last.Doc()); // skip first upto last
+                                       FirstToLast(); // and move it to the end
+                                       queueStale = true;
+                               }
+                               
+                               if (!more)
+                                       return false;
+                               
+                               // found doc w/ all clauses
+                               
+                               if (queueStale)
+                               {
+                                       // maintain the queue
+                                       ListToQueue();
+                                       queueStale = false;
+                               }
+                               
+                               if (AtMatch())
+                               {
+                                       return true;
+                               }
+                               
+                               more = Min().Next();
+                               if (more)
+                               {
+                                       queue.AdjustTop(); // maintain queue
+                               }
+                       }
+                       return false; // no more matches
+               }
+               
+               public override bool SkipTo(int target)
+               {
+                       if (firstTime)
+                       {
+                               // initialize
+                               InitList(false);
+                               for (SpansCell cell = first; more && cell != null; cell = cell.next)
+                               {
+                                       more = cell.SkipTo(target); // skip all
+                               }
+                               if (more)
+                               {
+                                       ListToQueue();
+                               }
+                               firstTime = false;
+                       }
+                       else
+                       {
+                               // normal case
+                               while (more && Min().Doc() < target)
+                               {
+                                       // skip as needed
+                                       if (Min().SkipTo(target))
+                                       {
+                                               queue.AdjustTop();
+                                       }
+                                       else
+                                       {
+                                               more = false;
+                                       }
+                               }
+                       }
+                       return more && (AtMatch() || Next());
+               }
+               
+               private SpansCell Min()
+               {
+                       return (SpansCell) queue.Top();
+               }
+               
+               public override int Doc()
+               {
+                       return Min().Doc();
+               }
+               public override int Start()
+               {
+                       return Min().Start();
+               }
+               public override int End()
+               {
+                       return max.End();
+               }
+               
+               // TODO: Remove warning after API has been finalized
+               /// <summary> WARNING: The List is not necessarily in order of the the positions</summary>
+               /// <returns> Collection of <code>byte[]</code> payloads
+               /// </returns>
+               /// <throws>  IOException </throws>
+               public override System.Collections.Generic.ICollection<byte[]> GetPayload()
+               {
+            //mgarski: faking out another HashSet<T>...
+                       System.Collections.Generic.Dictionary<byte[], byte[]> matchPayload = new System.Collections.Generic.Dictionary<byte[], byte[]>(); 
+                       for (SpansCell cell = first; cell != null; cell = cell.next)
+                       {
+                               if (cell.IsPayloadAvailable())
+                               {
+                    System.Collections.Generic.ICollection<byte[]> cellPayload = cell.GetPayload();
+                    foreach (byte[] val in cellPayload)
+                    {
+                        if (!matchPayload.ContainsKey(val))
+                        {
+                            matchPayload.Add(val, val);
+                        }
+                    }
+                               }
+                       }
+                       return matchPayload.Keys;
+               }
+               
+               // TODO: Remove warning after API has been finalized
+               public override bool IsPayloadAvailable()
+               {
+                       SpansCell pointer = Min();
+                       while (pointer != null)
+                       {
+                               if (pointer.IsPayloadAvailable())
+                               {
+                                       return true;
+                               }
+                               pointer = pointer.next;
+                       }
+                       
+                       return false;
+               }
+               
+               public override System.String ToString()
+               {
+                       return GetType().FullName + "(" + query.ToString() + ")@" + (firstTime?"START":(more?(Doc() + ":" + Start() + "-" + End()):"END"));
+               }
+               
+               private void  InitList(bool next)
+               {
+                       for (int i = 0; more && i < ordered.Count; i++)
+                       {
+                               SpansCell cell = (SpansCell) ordered[i];
+                               if (next)
+                                       more = cell.Next(); // move to first entry
+                               if (more)
+                               {
+                                       AddToList(cell); // add to list
+                               }
+                       }
+               }
+               
+               private void  AddToList(SpansCell cell)
+               {
+                       if (last != null)
+                       {
+                               // add next to end of list
+                               last.next = cell;
+                       }
+                       else
+                               first = cell;
+                       last = cell;
+                       cell.next = null;
+               }
+               
+               private void  FirstToLast()
+               {
+                       last.next = first; // move first to end of list
+                       last = first;
+                       first = first.next;
+                       last.next = null;
+               }
+               
+               private void  QueueToList()
+               {
+                       last = first = null;
+                       while (queue.Top() != null)
+                       {
+                               AddToList((SpansCell) queue.Pop());
+                       }
+               }
+               
+               private void  ListToQueue()
+               {
+                       queue.Clear(); // rebuild queue
+                       for (SpansCell cell = first; cell != null; cell = cell.next)
+                       {
+                               queue.Put(cell); // add to queue from list
+                       }
+               }
+               
+               private bool AtMatch()
+               {
+                       return (Min().Doc() == max.Doc()) && ((max.End() - Min().Start() - totalLength) <= slop);
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Spans/Package.html b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Spans/Package.html
new file mode 100644 (file)
index 0000000..43cb1ab
--- /dev/null
@@ -0,0 +1,92 @@
+<!doctype html public "-//w3c//dtd html 4.0 transitional//en">\r
+<!--\r
+ Licensed to the Apache Software Foundation (ASF) under one or more\r
+ contributor license agreements.  See the NOTICE file distributed with\r
+ this work for additional information regarding copyright ownership.\r
+ The ASF licenses this file to You under the Apache License, Version 2.0\r
+ (the "License"); you may not use this file except in compliance with\r
+ the License.  You may obtain a copy of the License at\r
+\r
+     http://www.apache.org/licenses/LICENSE-2.0\r
+\r
+ Unless required by applicable law or agreed to in writing, software\r
+ distributed under the License is distributed on an "AS IS" BASIS,\r
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
+ See the License for the specific language governing permissions and\r
+ limitations under the License.\r
+-->\r
+<html>\r
+<head></head>\r
+<body>\r
+The calculus of spans.\r
+\r
+<p>A span is a <code>&lt;doc,startPosition,endPosition&gt;</code> tuple.</p>\r
+\r
+<p>The following span query operators are implemented:\r
+\r
+<ul>\r
+\r
+<li>A <a href = "SpanTermQuery.html">SpanTermQuery</a> matches all spans\r
+containing a particular <a href = "index/Term.html">Term</a>.</li>\r
+\r
+<li> A <a href = "SpanNearQuery.html">SpanNearQuery</a> matches spans\r
+which occur near one another, and can be used to implement things like\r
+phrase search (when constructed from <a\r
+href="SpanTermQuery.html">SpanTermQueries</a>) and inter-phrase\r
+proximity (when constructed from other <a\r
+href="SpanNearQuery.html">SpanNearQueries</a>).</li>\r
+\r
+<li>A <a href = "SpanOrQuery.html">SpanOrQuery</a> merges spans from a\r
+number of other <a href = "SpanQuery.html">SpanQueries</a>.</li>\r
+\r
+<li>A <a href = "SpanNotQuery.html">SpanNotQuery</a> removes spans\r
+matching one <a href = "SpanQuery.html">SpanQuery</a> which overlap\r
+another.  This can be used, e.g., to implement within-paragraph\r
+search.</li>\r
+\r
+<li>A <a href = "SpanFirstQuery.html">SpanFirstQuery</a> matches spans\r
+matching <code>q</code> whose end position is less than\r
+<code>n</code>.  This can be used to constrain matches to the first\r
+part of the document.</li>\r
+\r
+</ul>\r
+\r
+In all cases, output spans are minimally inclusive.  In other words, a\r
+span formed by matching a span in x and y starts at the lesser of the\r
+two starts and ends at the greater of the two ends.\r
+</p>\r
+\r
+<p>For example, a span query which matches "John Kerry" within ten\r
+words of "George Bush" within the first 100 words of the document\r
+could be constructed with:\r
+<pre>\r
+SpanQuery john   = new SpanTermQuery(new Term("content", "john"));\r
+SpanQuery kerry  = new SpanTermQuery(new Term("content", "kerry"));\r
+SpanQuery george = new SpanTermQuery(new Term("content", "george"));\r
+SpanQuery bush   = new SpanTermQuery(new Term("content", "bush"));\r
+\r
+SpanQuery johnKerry =\r
+   new SpanNearQuery(new SpanQuery[] {john, kerry}, 0, true);\r
+\r
+SpanQuery georgeBush =\r
+   new SpanNearQuery(new SpanQuery[] {george, bush}, 0, true);\r
+\r
+SpanQuery johnKerryNearGeorgeBush =\r
+   new SpanNearQuery(new SpanQuery[] {johnKerry, georgeBush}, 10, false);\r
+\r
+SpanQuery johnKerryNearGeorgeBushAtStart =\r
+   new SpanFirstQuery(johnKerryNearGeorgeBush, 100);\r
+</pre>\r
+\r
+<p>Span queries may be freely intermixed with other Lucene queries.\r
+So, for example, the above query can be restricted to documents which\r
+also use the word "iraq" with:\r
+\r
+<pre>\r
+Query query = new BooleanQuery();\r
+query.add(johnKerryNearGeorgeBushAtStart, true, false);\r
+query.add(new TermQuery("content", "iraq"), true, false);\r
+</pre>\r
+\r
+</body>\r
+</html>\r
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Spans/SpanFirstQuery.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Spans/SpanFirstQuery.cs
new file mode 100644 (file)
index 0000000..cb1050d
--- /dev/null
@@ -0,0 +1,218 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+using ToStringUtils = Mono.Lucene.Net.Util.ToStringUtils;
+using Query = Mono.Lucene.Net.Search.Query;
+
+namespace Mono.Lucene.Net.Search.Spans
+{
+       
+       /// <summary>Matches spans near the beginning of a field. </summary>
+       [Serializable]
+       public class SpanFirstQuery:SpanQuery, System.ICloneable
+       {
+               private class AnonymousClassSpans : Spans
+               {
+                       public AnonymousClassSpans(Mono.Lucene.Net.Index.IndexReader reader, SpanFirstQuery enclosingInstance)
+                       {
+                               InitBlock(reader, enclosingInstance);
+                       }
+                       private void  InitBlock(Mono.Lucene.Net.Index.IndexReader reader, SpanFirstQuery enclosingInstance)
+                       {
+                               this.reader = reader;
+                               this.enclosingInstance = enclosingInstance;
+                               spans = Enclosing_Instance.match.GetSpans(reader);
+                       }
+                       private Mono.Lucene.Net.Index.IndexReader reader;
+                       private SpanFirstQuery enclosingInstance;
+                       public SpanFirstQuery Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       private Spans spans;
+                       
+                       public override bool Next()
+                       {
+                               while (spans.Next())
+                               {
+                                       // scan to next match
+                                       if (End() <= Enclosing_Instance.end)
+                                               return true;
+                               }
+                               return false;
+                       }
+                       
+                       public override bool SkipTo(int target)
+                       {
+                               if (!spans.SkipTo(target))
+                                       return false;
+                               
+                               return spans.End() <= Enclosing_Instance.end || Next();
+                       }
+                       
+                       public override int Doc()
+                       {
+                               return spans.Doc();
+                       }
+                       public override int Start()
+                       {
+                               return spans.Start();
+                       }
+                       public override int End()
+                       {
+                               return spans.End();
+                       }
+                       
+                       // TODO: Remove warning after API has been finalized
+                       public override System.Collections.Generic.ICollection<byte[]> GetPayload()
+                       {
+                               System.Collections.Generic.ICollection<byte[]> result = null;
+                               if (spans.IsPayloadAvailable())
+                               {
+                                       result = spans.GetPayload();
+                               }
+                               return result; //TODO: any way to avoid the new construction?
+                       }
+                       
+                       // TODO: Remove warning after API has been finalized
+                       public override bool IsPayloadAvailable()
+                       {
+                               return spans.IsPayloadAvailable();
+                       }
+                       
+                       public override System.String ToString()
+                       {
+                               return "spans(" + Enclosing_Instance.ToString() + ")";
+                       }
+               }
+               private SpanQuery match;
+               private int end;
+               
+               /// <summary>Construct a SpanFirstQuery matching spans in <code>match</code> whose end
+               /// position is less than or equal to <code>end</code>. 
+               /// </summary>
+               public SpanFirstQuery(SpanQuery match, int end)
+               {
+                       this.match = match;
+                       this.end = end;
+               }
+               
+               /// <summary>Return the SpanQuery whose matches are filtered. </summary>
+               public virtual SpanQuery GetMatch()
+               {
+                       return match;
+               }
+               
+               /// <summary>Return the maximum end position permitted in a match. </summary>
+               public virtual int GetEnd()
+               {
+                       return end;
+               }
+               
+               public override System.String GetField()
+               {
+                       return match.GetField();
+               }
+               
+               /// <summary>Returns a collection of all terms matched by this query.</summary>
+               /// <deprecated> use extractTerms instead
+               /// </deprecated>
+               /// <seealso cref="ExtractTerms(Set)">
+               /// </seealso>
+        [Obsolete("use ExtractTerms instead")]
+               public override System.Collections.ICollection GetTerms()
+               {
+                       return match.GetTerms();
+               }
+               
+               public override System.String ToString(System.String field)
+               {
+                       System.Text.StringBuilder buffer = new System.Text.StringBuilder();
+                       buffer.Append("spanFirst(");
+                       buffer.Append(match.ToString(field));
+                       buffer.Append(", ");
+                       buffer.Append(end);
+                       buffer.Append(")");
+                       buffer.Append(ToStringUtils.Boost(GetBoost()));
+                       return buffer.ToString();
+               }
+               
+               public override System.Object Clone()
+               {
+                       SpanFirstQuery spanFirstQuery = new SpanFirstQuery((SpanQuery) match.Clone(), end);
+                       spanFirstQuery.SetBoost(GetBoost());
+                       return spanFirstQuery;
+               }
+               
+               public override void  ExtractTerms(System.Collections.Hashtable terms)
+               {
+                       match.ExtractTerms(terms);
+               }
+               
+               public override Spans GetSpans(IndexReader reader)
+               {
+                       return new AnonymousClassSpans(reader, this);
+               }
+               
+               public override Query Rewrite(IndexReader reader)
+               {
+                       SpanFirstQuery clone = null;
+                       
+                       SpanQuery rewritten = (SpanQuery) match.Rewrite(reader);
+                       if (rewritten != match)
+                       {
+                               clone = (SpanFirstQuery) this.Clone();
+                               clone.match = rewritten;
+                       }
+                       
+                       if (clone != null)
+                       {
+                               return clone; // some clauses rewrote
+                       }
+                       else
+                       {
+                               return this; // no clauses rewrote
+                       }
+               }
+               
+               public  override bool Equals(System.Object o)
+               {
+                       if (this == o)
+                               return true;
+                       if (!(o is SpanFirstQuery))
+                               return false;
+                       
+                       SpanFirstQuery other = (SpanFirstQuery) o;
+                       return this.end == other.end && this.match.Equals(other.match) && this.GetBoost() == other.GetBoost();
+               }
+               
+               public override int GetHashCode()
+               {
+                       int h = match.GetHashCode();
+                       h ^= ((h << 8) | (SupportClass.Number.URShift(h, 25))); // reversible
+                       h ^= System.Convert.ToInt32(GetBoost()) ^ end;
+                       return h;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Spans/SpanNearQuery.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Spans/SpanNearQuery.cs
new file mode 100644 (file)
index 0000000..e829eab
--- /dev/null
@@ -0,0 +1,246 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+using ToStringUtils = Mono.Lucene.Net.Util.ToStringUtils;
+using Query = Mono.Lucene.Net.Search.Query;
+
+namespace Mono.Lucene.Net.Search.Spans
+{
+       
+       /// <summary>Matches spans which are near one another.  One can specify <i>slop</i>, the
+       /// maximum number of intervening unmatched positions, as well as whether
+       /// matches are required to be in-order. 
+       /// </summary>
+       [Serializable]
+       public class SpanNearQuery:SpanQuery, System.ICloneable
+       {
+               protected internal System.Collections.ArrayList clauses;
+               protected internal int slop;
+               protected internal bool inOrder;
+               
+               protected internal System.String field;
+               private bool collectPayloads;
+               
+               /// <summary>Construct a SpanNearQuery.  Matches spans matching a span from each
+               /// clause, with up to <code>slop</code> total unmatched positions between
+               /// them.  * When <code>inOrder</code> is true, the spans from each clause
+               /// must be * ordered as in <code>clauses</code>. 
+               /// </summary>
+               public SpanNearQuery(SpanQuery[] clauses, int slop, bool inOrder):this(clauses, slop, inOrder, true)
+               {
+               }
+               
+               public SpanNearQuery(SpanQuery[] clauses, int slop, bool inOrder, bool collectPayloads)
+               {
+                       
+                       // copy clauses array into an ArrayList
+                       this.clauses = new System.Collections.ArrayList(clauses.Length);
+                       for (int i = 0; i < clauses.Length; i++)
+                       {
+                               SpanQuery clause = clauses[i];
+                               if (i == 0)
+                               {
+                                       // check field
+                                       field = clause.GetField();
+                               }
+                               else if (!clause.GetField().Equals(field))
+                               {
+                                       throw new System.ArgumentException("Clauses must have same field.");
+                               }
+                               this.clauses.Add(clause);
+                       }
+                       this.collectPayloads = collectPayloads;
+                       this.slop = slop;
+                       this.inOrder = inOrder;
+               }
+               
+               /// <summary>Return the clauses whose spans are matched. </summary>
+               public virtual SpanQuery[] GetClauses()
+               {
+                       return (SpanQuery[]) clauses.ToArray(typeof(SpanQuery));
+               }
+               
+               /// <summary>Return the maximum number of intervening unmatched positions permitted.</summary>
+               public virtual int GetSlop()
+               {
+                       return slop;
+               }
+               
+               /// <summary>Return true if matches are required to be in-order.</summary>
+               public virtual bool IsInOrder()
+               {
+                       return inOrder;
+               }
+               
+               public override System.String GetField()
+               {
+                       return field;
+               }
+               
+               /// <summary>Returns a collection of all terms matched by this query.</summary>
+               /// <deprecated> use extractTerms instead
+               /// </deprecated>
+               /// <seealso cref="ExtractTerms(Set)">
+               /// </seealso>
+        [Obsolete("use ExtractTerms instead")]
+               public override System.Collections.ICollection GetTerms()
+               {
+                       System.Collections.ArrayList terms = new System.Collections.ArrayList();
+                       System.Collections.IEnumerator i = clauses.GetEnumerator();
+                       while (i.MoveNext())
+                       {
+                               SpanQuery clause = (SpanQuery) i.Current;
+                               terms.AddRange(clause.GetTerms());
+                       }
+                       return terms;
+               }
+               
+               public override void  ExtractTerms(System.Collections.Hashtable terms)
+               {
+            foreach (SpanQuery clause in clauses)
+            {
+                clause.ExtractTerms(terms);
+            }
+               }
+               
+               
+               public override System.String ToString(System.String field)
+               {
+                       System.Text.StringBuilder buffer = new System.Text.StringBuilder();
+                       buffer.Append("spanNear([");
+                       System.Collections.IEnumerator i = clauses.GetEnumerator();
+                       while (i.MoveNext())
+                       {
+                               SpanQuery clause = (SpanQuery) i.Current;
+                               buffer.Append(clause.ToString(field));
+                buffer.Append(", ");
+                       }
+            if (clauses.Count > 0) buffer.Length -= 2;
+                       buffer.Append("], ");
+                       buffer.Append(slop);
+                       buffer.Append(", ");
+                       buffer.Append(inOrder);
+                       buffer.Append(")");
+                       buffer.Append(ToStringUtils.Boost(GetBoost()));
+                       return buffer.ToString();
+               }
+               
+               public override Spans GetSpans(IndexReader reader)
+               {
+                       if (clauses.Count == 0)
+                       // optimize 0-clause case
+                               return new SpanOrQuery(GetClauses()).GetSpans(reader);
+                       
+                       if (clauses.Count == 1)
+                       // optimize 1-clause case
+                               return ((SpanQuery) clauses[0]).GetSpans(reader);
+                       
+                       return inOrder?(Spans) new NearSpansOrdered(this, reader, collectPayloads):(Spans) new NearSpansUnordered(this, reader);
+               }
+               
+               public override Query Rewrite(IndexReader reader)
+               {
+                       SpanNearQuery clone = null;
+                       for (int i = 0; i < clauses.Count; i++)
+                       {
+                               SpanQuery c = (SpanQuery) clauses[i];
+                               SpanQuery query = (SpanQuery) c.Rewrite(reader);
+                               if (query != c)
+                               {
+                                       // clause rewrote: must clone
+                                       if (clone == null)
+                                               clone = (SpanNearQuery) this.Clone();
+                                       clone.clauses[i] = query;
+                               }
+                       }
+                       if (clone != null)
+                       {
+                               return clone; // some clauses rewrote
+                       }
+                       else
+                       {
+                               return this; // no clauses rewrote
+                       }
+               }
+               
+               public override System.Object Clone()
+               {
+                       int sz = clauses.Count;
+                       SpanQuery[] newClauses = new SpanQuery[sz];
+                       
+                       for (int i = 0; i < sz; i++)
+                       {
+                               SpanQuery clause = (SpanQuery) clauses[i];
+                               newClauses[i] = (SpanQuery) clause.Clone();
+                       }
+                       SpanNearQuery spanNearQuery = new SpanNearQuery(newClauses, slop, inOrder);
+                       spanNearQuery.SetBoost(GetBoost());
+                       return spanNearQuery;
+               }
+               
+               /// <summary>Returns true iff <code>o</code> is equal to this. </summary>
+               public  override bool Equals(System.Object o)
+               {
+                       if (this == o)
+                               return true;
+                       if (!(o is SpanNearQuery))
+                               return false;
+                       
+                       SpanNearQuery spanNearQuery = (SpanNearQuery) o;
+                       
+                       if (inOrder != spanNearQuery.inOrder)
+                               return false;
+                       if (slop != spanNearQuery.slop)
+                               return false;
+                       if (clauses.Count != spanNearQuery.clauses.Count)
+                               return false;
+            System.Collections.IEnumerator iter1 = clauses.GetEnumerator();
+            System.Collections.IEnumerator iter2 = spanNearQuery.clauses.GetEnumerator();
+            while (iter1.MoveNext() && iter2.MoveNext())
+            {
+                SpanQuery item1 = (SpanQuery)iter1.Current;
+                SpanQuery item2 = (SpanQuery)iter2.Current;
+                if (!item1.Equals(item2))
+                    return false;
+            }
+                       
+                       return GetBoost() == spanNearQuery.GetBoost();
+               }
+               
+               public override int GetHashCode()
+               {
+                       long result = 0;
+            //mgarski .NET uses the arraylist's location, not contents to calculate the hash
+            // need to start with result being the hash of the contents.
+            foreach (SpanQuery sq in clauses)
+            {
+                result += sq.GetHashCode();
+            }
+                       // Mix bits before folding in things like boost, since it could cancel the
+                       // last element of clauses.  This particular mix also serves to
+                       // differentiate SpanNearQuery hashcodes from others.
+                       result ^= ((result << 14) | (SupportClass.Number.URShift(result, 19))); // reversible
+                       result += System.Convert.ToInt32(GetBoost());
+                       result += slop;
+                       result ^= (inOrder ? (long) 0x99AFD3BD : 0);
+                       return (int) result;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Spans/SpanNotQuery.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Spans/SpanNotQuery.cs
new file mode 100644 (file)
index 0000000..24401cd
--- /dev/null
@@ -0,0 +1,267 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+using ToStringUtils = Mono.Lucene.Net.Util.ToStringUtils;
+using Query = Mono.Lucene.Net.Search.Query;
+
+namespace Mono.Lucene.Net.Search.Spans
+{
+       
+       /// <summary>Removes matches which overlap with another SpanQuery. </summary>
+       [Serializable]
+       public class SpanNotQuery:SpanQuery, System.ICloneable
+       {
+               private class AnonymousClassSpans : Spans
+               {
+                       public AnonymousClassSpans(Mono.Lucene.Net.Index.IndexReader reader, SpanNotQuery enclosingInstance)
+                       {
+                               InitBlock(reader, enclosingInstance);
+                       }
+                       private void  InitBlock(Mono.Lucene.Net.Index.IndexReader reader, SpanNotQuery enclosingInstance)
+                       {
+                               this.reader = reader;
+                               this.enclosingInstance = enclosingInstance;
+                               includeSpans = Enclosing_Instance.include.GetSpans(reader);
+                               excludeSpans = Enclosing_Instance.exclude.GetSpans(reader);
+                               moreExclude = excludeSpans.Next();
+                       }
+                       private Mono.Lucene.Net.Index.IndexReader reader;
+                       private SpanNotQuery enclosingInstance;
+                       public SpanNotQuery Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       private Spans includeSpans;
+                       private bool moreInclude = true;
+                       
+                       private Spans excludeSpans;
+                       private bool moreExclude;
+                       
+                       public override bool Next()
+                       {
+                               if (moreInclude)
+                               // move to next include
+                                       moreInclude = includeSpans.Next();
+                               
+                               while (moreInclude && moreExclude)
+                               {
+                                       
+                                       if (includeSpans.Doc() > excludeSpans.Doc())
+                                       // skip exclude
+                                               moreExclude = excludeSpans.SkipTo(includeSpans.Doc());
+                                       
+                                       while (moreExclude && includeSpans.Doc() == excludeSpans.Doc() && excludeSpans.End() <= includeSpans.Start())
+                                       {
+                                               moreExclude = excludeSpans.Next(); // increment exclude
+                                       }
+                                       
+                                       if (!moreExclude || includeSpans.Doc() != excludeSpans.Doc() || includeSpans.End() <= excludeSpans.Start())
+                                               break; // we found a match
+                                       
+                                       moreInclude = includeSpans.Next(); // intersected: keep scanning
+                               }
+                               return moreInclude;
+                       }
+                       
+                       public override bool SkipTo(int target)
+                       {
+                               if (moreInclude)
+                               // skip include
+                                       moreInclude = includeSpans.SkipTo(target);
+                               
+                               if (!moreInclude)
+                                       return false;
+                               
+                               if (moreExclude && includeSpans.Doc() > excludeSpans.Doc())
+                                       moreExclude = excludeSpans.SkipTo(includeSpans.Doc());
+                               
+                               while (moreExclude && includeSpans.Doc() == excludeSpans.Doc() && excludeSpans.End() <= includeSpans.Start())
+                               {
+                                       moreExclude = excludeSpans.Next(); // increment exclude
+                               }
+                               
+                               if (!moreExclude || includeSpans.Doc() != excludeSpans.Doc() || includeSpans.End() <= excludeSpans.Start())
+                                       return true; // we found a match
+                               
+                               return Next(); // scan to next match
+                       }
+                       
+                       public override int Doc()
+                       {
+                               return includeSpans.Doc();
+                       }
+                       public override int Start()
+                       {
+                               return includeSpans.Start();
+                       }
+                       public override int End()
+                       {
+                               return includeSpans.End();
+                       }
+                       
+                       // TODO: Remove warning after API has been finalizedb
+                       public override System.Collections.Generic.ICollection<byte[]> GetPayload()
+                       {
+                               System.Collections.Generic.ICollection<byte[]> result = null;
+                               if (includeSpans.IsPayloadAvailable())
+                               {
+                                       result = includeSpans.GetPayload();
+                               }
+                               return result;
+                       }
+                       
+                       // TODO: Remove warning after API has been finalized
+                       public override bool IsPayloadAvailable()
+                       {
+                               return includeSpans.IsPayloadAvailable();
+                       }
+                       
+                       public override System.String ToString()
+                       {
+                               return "spans(" + Enclosing_Instance.ToString() + ")";
+                       }
+               }
+               private SpanQuery include;
+               private SpanQuery exclude;
+               
+               /// <summary>Construct a SpanNotQuery matching spans from <code>include</code> which
+               /// have no overlap with spans from <code>exclude</code>.
+               /// </summary>
+               public SpanNotQuery(SpanQuery include, SpanQuery exclude)
+               {
+                       this.include = include;
+                       this.exclude = exclude;
+                       
+                       if (!include.GetField().Equals(exclude.GetField()))
+                               throw new System.ArgumentException("Clauses must have same field.");
+               }
+               
+               /// <summary>Return the SpanQuery whose matches are filtered. </summary>
+               public virtual SpanQuery GetInclude()
+               {
+                       return include;
+               }
+               
+               /// <summary>Return the SpanQuery whose matches must not overlap those returned. </summary>
+               public virtual SpanQuery GetExclude()
+               {
+                       return exclude;
+               }
+               
+               public override System.String GetField()
+               {
+                       return include.GetField();
+               }
+               
+               /// <summary>Returns a collection of all terms matched by this query.</summary>
+               /// <deprecated> use extractTerms instead
+               /// </deprecated>
+               /// <seealso cref="ExtractTerms(Set)">
+               /// </seealso>
+        [Obsolete("use ExtractTerms instead")]
+               public override System.Collections.ICollection GetTerms()
+               {
+                       return include.GetTerms();
+               }
+               
+               public override void  ExtractTerms(System.Collections.Hashtable terms)
+               {
+                       include.ExtractTerms(terms);
+               }
+               
+               public override System.String ToString(System.String field)
+               {
+                       System.Text.StringBuilder buffer = new System.Text.StringBuilder();
+                       buffer.Append("spanNot(");
+                       buffer.Append(include.ToString(field));
+                       buffer.Append(", ");
+                       buffer.Append(exclude.ToString(field));
+                       buffer.Append(")");
+                       buffer.Append(ToStringUtils.Boost(GetBoost()));
+                       return buffer.ToString();
+               }
+               
+               public override System.Object Clone()
+               {
+                       SpanNotQuery spanNotQuery = new SpanNotQuery((SpanQuery) include.Clone(), (SpanQuery) exclude.Clone());
+                       spanNotQuery.SetBoost(GetBoost());
+                       return spanNotQuery;
+               }
+               
+               public override Spans GetSpans(IndexReader reader)
+               {
+                       return new AnonymousClassSpans(reader, this);
+               }
+               
+               public override Query Rewrite(IndexReader reader)
+               {
+                       SpanNotQuery clone = null;
+                       
+                       SpanQuery rewrittenInclude = (SpanQuery) include.Rewrite(reader);
+                       if (rewrittenInclude != include)
+                       {
+                               clone = (SpanNotQuery) this.Clone();
+                               clone.include = rewrittenInclude;
+                       }
+                       SpanQuery rewrittenExclude = (SpanQuery) exclude.Rewrite(reader);
+                       if (rewrittenExclude != exclude)
+                       {
+                               if (clone == null)
+                                       clone = (SpanNotQuery) this.Clone();
+                               clone.exclude = rewrittenExclude;
+                       }
+                       
+                       if (clone != null)
+                       {
+                               return clone; // some clauses rewrote
+                       }
+                       else
+                       {
+                               return this; // no clauses rewrote
+                       }
+               }
+               
+               /// <summary>Returns true iff <code>o</code> is equal to this. </summary>
+               public  override bool Equals(System.Object o)
+               {
+                       if (this == o)
+                               return true;
+                       if (!(o is SpanNotQuery))
+                               return false;
+                       
+                       SpanNotQuery other = (SpanNotQuery) o;
+                       return this.include.Equals(other.include) && this.exclude.Equals(other.exclude) && this.GetBoost() == other.GetBoost();
+               }
+               
+               public override int GetHashCode()
+               {
+                       int h = include.GetHashCode();
+                       h = (h << 1) | (SupportClass.Number.URShift(h, 31)); // rotate left
+                       h ^= exclude.GetHashCode();
+                       h = (h << 1) | (SupportClass.Number.URShift(h, 31)); // rotate left
+                       h ^= System.Convert.ToInt32(GetBoost());
+                       return h;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Spans/SpanOrQuery.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Spans/SpanOrQuery.cs
new file mode 100644 (file)
index 0000000..6fa0eef
--- /dev/null
@@ -0,0 +1,367 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+using PriorityQueue = Mono.Lucene.Net.Util.PriorityQueue;
+using ToStringUtils = Mono.Lucene.Net.Util.ToStringUtils;
+using Query = Mono.Lucene.Net.Search.Query;
+
+namespace Mono.Lucene.Net.Search.Spans
+{
+       
+       /// <summary>Matches the union of its clauses.</summary>
+       [Serializable]
+       public class SpanOrQuery:SpanQuery, System.ICloneable
+       {
+               private class AnonymousClassSpans : Spans
+               {
+                       public AnonymousClassSpans(Mono.Lucene.Net.Index.IndexReader reader, SpanOrQuery enclosingInstance)
+                       {
+                               InitBlock(reader, enclosingInstance);
+                       }
+                       private void  InitBlock(Mono.Lucene.Net.Index.IndexReader reader, SpanOrQuery enclosingInstance)
+                       {
+                               this.reader = reader;
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private Mono.Lucene.Net.Index.IndexReader reader;
+                       private SpanOrQuery enclosingInstance;
+                       public SpanOrQuery Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       private SpanQueue queue = null;
+                       
+                       private bool InitSpanQueue(int target)
+                       {
+                               queue = new SpanQueue(enclosingInstance, Enclosing_Instance.clauses.Count);
+                               System.Collections.IEnumerator i = Enclosing_Instance.clauses.GetEnumerator();
+                               while (i.MoveNext())
+                               {
+                                       Spans spans = ((SpanQuery) i.Current).GetSpans(reader);
+                                       if (((target == - 1) && spans.Next()) || ((target != - 1) && spans.SkipTo(target)))
+                                       {
+                                               queue.Put(spans);
+                                       }
+                               }
+                               return queue.Size() != 0;
+                       }
+                       
+                       public override bool Next()
+                       {
+                               if (queue == null)
+                               {
+                                       return InitSpanQueue(- 1);
+                               }
+                               
+                               if (queue.Size() == 0)
+                               {
+                                       // all done
+                                       return false;
+                               }
+                               
+                               if (Top().Next())
+                               {
+                                       // move to next
+                                       queue.AdjustTop();
+                                       return true;
+                               }
+                               
+                               queue.Pop(); // exhausted a clause
+                               return queue.Size() != 0;
+                       }
+                       
+                       private Spans Top()
+                       {
+                               return (Spans) queue.Top();
+                       }
+                       
+                       public override bool SkipTo(int target)
+                       {
+                               if (queue == null)
+                               {
+                                       return InitSpanQueue(target);
+                               }
+                               
+                               bool skipCalled = false;
+                               while (queue.Size() != 0 && Top().Doc() < target)
+                               {
+                                       if (Top().SkipTo(target))
+                                       {
+                                               queue.AdjustTop();
+                                       }
+                                       else
+                                       {
+                                               queue.Pop();
+                                       }
+                                       skipCalled = true;
+                               }
+                               
+                               if (skipCalled)
+                               {
+                                       return queue.Size() != 0;
+                               }
+                               return Next();
+                       }
+                       
+                       public override int Doc()
+                       {
+                               return Top().Doc();
+                       }
+                       public override int Start()
+                       {
+                               return Top().Start();
+                       }
+                       public override int End()
+                       {
+                               return Top().End();
+                       }
+                       
+                       // TODO: Remove warning after API has been finalized
+                       public override System.Collections.Generic.ICollection<byte[]> GetPayload()
+                       {
+                               System.Collections.Generic.ICollection<byte[]> result = null;
+                               Spans theTop = Top();
+                               if (theTop != null && theTop.IsPayloadAvailable())
+                               {
+                                       result = theTop.GetPayload();
+                               }
+                               return result;
+                       }
+                       
+                       // TODO: Remove warning after API has been finalized
+                       public override bool IsPayloadAvailable()
+                       {
+                               Spans top = Top();
+                               return top != null && top.IsPayloadAvailable();
+                       }
+                       
+                       public override System.String ToString()
+                       {
+                               return "spans(" + Enclosing_Instance + ")@" + ((queue == null)?"START":(queue.Size() > 0?(Doc() + ":" + Start() + "-" + End()):"END"));
+                       }
+               }
+               private SupportClass.EquatableList<SpanQuery> clauses;
+               private System.String field;
+               
+               /// <summary>Construct a SpanOrQuery merging the provided clauses. </summary>
+               public SpanOrQuery(SpanQuery[] clauses)
+               {
+                       
+                       // copy clauses array into an ArrayList
+                       this.clauses = new SupportClass.EquatableList<SpanQuery>(clauses.Length);
+                       for (int i = 0; i < clauses.Length; i++)
+                       {
+                               SpanQuery clause = clauses[i];
+                               if (i == 0)
+                               {
+                                       // check field
+                                       field = clause.GetField();
+                               }
+                               else if (!clause.GetField().Equals(field))
+                               {
+                                       throw new System.ArgumentException("Clauses must have same field.");
+                               }
+                               this.clauses.Add(clause);
+                       }
+               }
+               
+               /// <summary>Return the clauses whose spans are matched. </summary>
+               public virtual SpanQuery[] GetClauses()
+               {
+                       return (SpanQuery[]) clauses.ToArray();
+               }
+               
+               public override System.String GetField()
+               {
+                       return field;
+               }
+               
+               /// <summary>Returns a collection of all terms matched by this query.</summary>
+               /// <deprecated> use extractTerms instead
+               /// </deprecated>
+               /// <seealso cref="ExtractTerms(Set)">
+               /// </seealso>
+        [Obsolete("use ExtractTerms instead")]
+               public override System.Collections.ICollection GetTerms()
+               {
+                       System.Collections.ArrayList terms = new System.Collections.ArrayList();
+                       System.Collections.IEnumerator i = clauses.GetEnumerator();
+                       while (i.MoveNext())
+                       {
+                               SpanQuery clause = (SpanQuery) i.Current;
+                               terms.AddRange(clause.GetTerms());
+                       }
+                       return terms;
+               }
+               
+               public override void  ExtractTerms(System.Collections.Hashtable terms)
+               {
+                       System.Collections.IEnumerator i = clauses.GetEnumerator();
+                       while (i.MoveNext())
+                       {
+                               SpanQuery clause = (SpanQuery) i.Current;
+                               clause.ExtractTerms(terms);
+                       }
+               }
+               
+               public override System.Object Clone()
+               {
+                       int sz = clauses.Count;
+                       SpanQuery[] newClauses = new SpanQuery[sz];
+                       
+                       for (int i = 0; i < sz; i++)
+                       {
+                               SpanQuery clause = (SpanQuery) clauses[i];
+                               newClauses[i] = (SpanQuery) clause.Clone();
+                       }
+                       SpanOrQuery soq = new SpanOrQuery(newClauses);
+                       soq.SetBoost(GetBoost());
+                       return soq;
+               }
+               
+               public override Query Rewrite(IndexReader reader)
+               {
+                       SpanOrQuery clone = null;
+                       for (int i = 0; i < clauses.Count; i++)
+                       {
+                               SpanQuery c = (SpanQuery) clauses[i];
+                               SpanQuery query = (SpanQuery) c.Rewrite(reader);
+                               if (query != c)
+                               {
+                                       // clause rewrote: must clone
+                                       if (clone == null)
+                                               clone = (SpanOrQuery) this.Clone();
+                                       clone.clauses[i] = query;
+                               }
+                       }
+                       if (clone != null)
+                       {
+                               return clone; // some clauses rewrote
+                       }
+                       else
+                       {
+                               return this; // no clauses rewrote
+                       }
+               }
+               
+               public override System.String ToString(System.String field)
+               {
+                       System.Text.StringBuilder buffer = new System.Text.StringBuilder();
+                       buffer.Append("spanOr([");
+                       System.Collections.IEnumerator i = clauses.GetEnumerator();
+            int j = 0;
+                       while (i.MoveNext())
+                       {
+                j++;
+                               SpanQuery clause = (SpanQuery) i.Current;
+                               buffer.Append(clause.ToString(field));
+                if (j < clauses.Count)
+                {
+                    buffer.Append(", ");
+                }
+                       }
+                       buffer.Append("])");
+                       buffer.Append(ToStringUtils.Boost(GetBoost()));
+                       return buffer.ToString();
+               }
+               
+               public  override bool Equals(System.Object o)
+               {
+                       if (this == o)
+                               return true;
+                       if (o == null || GetType() != o.GetType())
+                               return false;
+                       
+                       SpanOrQuery that = (SpanOrQuery) o;
+                       
+                       if (!clauses.Equals(that.clauses))
+                               return false;
+                       if (!(clauses.Count == 0) && !field.Equals(that.field))
+                               return false;
+                       
+                       return GetBoost() == that.GetBoost();
+               }
+               
+               public override int GetHashCode()
+               {
+                       int h = clauses.GetHashCode();
+                       h ^= ((h << 10) | (SupportClass.Number.URShift(h, 23)));
+                       h ^= System.Convert.ToInt32(GetBoost());
+                       return h;
+               }
+               
+               
+               private class SpanQueue:PriorityQueue
+               {
+                       private void  InitBlock(SpanOrQuery enclosingInstance)
+                       {
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private SpanOrQuery enclosingInstance;
+                       public SpanOrQuery Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       public SpanQueue(SpanOrQuery enclosingInstance, int size)
+                       {
+                               InitBlock(enclosingInstance);
+                               Initialize(size);
+                       }
+                       
+                       public override bool LessThan(System.Object o1, System.Object o2)
+                       {
+                               Spans spans1 = (Spans) o1;
+                               Spans spans2 = (Spans) o2;
+                               if (spans1.Doc() == spans2.Doc())
+                               {
+                                       if (spans1.Start() == spans2.Start())
+                                       {
+                                               return spans1.End() < spans2.End();
+                                       }
+                                       else
+                                       {
+                                               return spans1.Start() < spans2.Start();
+                                       }
+                               }
+                               else
+                               {
+                                       return spans1.Doc() < spans2.Doc();
+                               }
+                       }
+               }
+               
+               public override Spans GetSpans(IndexReader reader)
+               {
+                       if (clauses.Count == 1)
+                       // optimize 1-clause case
+                               return ((SpanQuery) clauses[0]).GetSpans(reader);
+                       
+                       return new AnonymousClassSpans(reader, this);
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Spans/SpanQuery.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Spans/SpanQuery.cs
new file mode 100644 (file)
index 0000000..017aef9
--- /dev/null
@@ -0,0 +1,53 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+using Query = Mono.Lucene.Net.Search.Query;
+using Searcher = Mono.Lucene.Net.Search.Searcher;
+using Weight = Mono.Lucene.Net.Search.Weight;
+
+namespace Mono.Lucene.Net.Search.Spans
+{
+       
+       /// <summary>Base class for span-based queries. </summary>
+       [Serializable]
+       public abstract class SpanQuery:Query
+       {
+               /// <summary>Expert: Returns the matches for this query in an index.  Used internally
+               /// to search for spans. 
+               /// </summary>
+               public abstract Spans GetSpans(IndexReader reader);
+               
+               /// <summary>Returns the name of the field matched by this query.</summary>
+               public abstract System.String GetField();
+               
+               /// <summary>Returns a collection of all terms matched by this query.</summary>
+               /// <deprecated> use extractTerms instead
+               /// </deprecated>
+               /// <seealso cref="Query.ExtractTerms(Set)">
+               /// </seealso>
+        [Obsolete("use ExtractTerms instead")]
+               public abstract System.Collections.ICollection GetTerms();
+               
+               public override Weight CreateWeight(Searcher searcher)
+               {
+                       return new SpanWeight(this, searcher);
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Spans/SpanScorer.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Spans/SpanScorer.cs
new file mode 100644 (file)
index 0000000..4b1c06f
--- /dev/null
@@ -0,0 +1,155 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using Explanation = Mono.Lucene.Net.Search.Explanation;
+using Scorer = Mono.Lucene.Net.Search.Scorer;
+using Similarity = Mono.Lucene.Net.Search.Similarity;
+using Weight = Mono.Lucene.Net.Search.Weight;
+
+namespace Mono.Lucene.Net.Search.Spans
+{
+       
+       /// <summary> Public for extension only.</summary>
+       public class SpanScorer:Scorer
+       {
+               protected internal Spans spans;
+               protected internal Weight weight;
+               protected internal byte[] norms;
+               protected internal float value_Renamed;
+               
+               /// <deprecated> not needed anymore 
+               /// </deprecated>
+        [Obsolete("not needed anymore ")]
+               protected internal bool firstTime = true;
+               protected internal bool more = true;
+               
+               protected internal int doc;
+               protected internal float freq;
+               
+               protected internal SpanScorer(Spans spans, Weight weight, Similarity similarity, byte[] norms):base(similarity)
+               {
+                       this.spans = spans;
+                       this.norms = norms;
+                       this.weight = weight;
+                       this.value_Renamed = weight.GetValue();
+                       if (this.spans.Next())
+                       {
+                               doc = - 1;
+                       }
+                       else
+                       {
+                               doc = NO_MORE_DOCS;
+                               more = false;
+                       }
+               }
+               
+               /// <deprecated> use {@link #NextDoc()} instead. 
+               /// </deprecated>
+        [Obsolete("use NextDoc() instead.")]
+               public override bool Next()
+               {
+                       return NextDoc() != NO_MORE_DOCS;
+               }
+               
+               public override int NextDoc()
+               {
+                       if (!SetFreqCurrentDoc())
+                       {
+                               doc = NO_MORE_DOCS;
+                       }
+                       return doc;
+               }
+               
+               /// <deprecated> use {@link #Advance(int)} instead. 
+               /// </deprecated>
+        [Obsolete("use Advance(int) instead. ")]
+               public override bool SkipTo(int target)
+               {
+                       return Advance(target) != NO_MORE_DOCS;
+               }
+               
+               public override int Advance(int target)
+               {
+                       if (!more)
+                       {
+                               return doc = NO_MORE_DOCS;
+                       }
+                       if (spans.Doc() < target)
+                       {
+                               // setFreqCurrentDoc() leaves spans.doc() ahead
+                               more = spans.SkipTo(target);
+                       }
+                       if (!SetFreqCurrentDoc())
+                       {
+                               doc = NO_MORE_DOCS;
+                       }
+                       return doc;
+               }
+               
+               public /*protected internal*/ virtual bool SetFreqCurrentDoc()
+               {
+                       if (!more)
+                       {
+                               return false;
+                       }
+                       doc = spans.Doc();
+                       freq = 0.0f;
+                       do 
+                       {
+                               int matchLength = spans.End() - spans.Start();
+                               freq += GetSimilarity().SloppyFreq(matchLength);
+                               more = spans.Next();
+                       }
+                       while (more && (doc == spans.Doc()));
+                       return true;
+               }
+               
+               /// <deprecated> use {@link #DocID()} instead. 
+               /// </deprecated>
+        [Obsolete("use DocID() instead. ")]
+               public override int Doc()
+               {
+                       return doc;
+               }
+               
+               public override int DocID()
+               {
+                       return doc;
+               }
+               
+               public override float Score()
+               {
+                       float raw = GetSimilarity().Tf(freq) * value_Renamed; // raw score
+                       return norms == null?raw:raw * Similarity.DecodeNorm(norms[doc]); // normalize
+               }
+               
+               public override Explanation Explain(int doc)
+               {
+                       Explanation tfExplanation = new Explanation();
+                       
+                       int expDoc = Advance(doc);
+                       
+                       float phraseFreq = (expDoc == doc)?freq:0.0f;
+                       tfExplanation.SetValue(GetSimilarity().Tf(phraseFreq));
+                       tfExplanation.SetDescription("tf(phraseFreq=" + phraseFreq + ")");
+                       
+                       return tfExplanation;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Spans/SpanTermQuery.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Spans/SpanTermQuery.cs
new file mode 100644 (file)
index 0000000..0abe275
--- /dev/null
@@ -0,0 +1,112 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+using Term = Mono.Lucene.Net.Index.Term;
+using ToStringUtils = Mono.Lucene.Net.Util.ToStringUtils;
+
+namespace Mono.Lucene.Net.Search.Spans
+{
+       
+       /// <summary>Matches spans containing a term. </summary>
+       [Serializable]
+       public class SpanTermQuery:SpanQuery
+       {
+               protected internal Term term;
+               
+               /// <summary>Construct a SpanTermQuery matching the named term's spans. </summary>
+               public SpanTermQuery(Term term)
+               {
+                       this.term = term;
+               }
+               
+               /// <summary>Return the term whose spans are matched. </summary>
+               public virtual Term GetTerm()
+               {
+                       return term;
+               }
+               
+               public override System.String GetField()
+               {
+                       return term.Field();
+               }
+               
+               /// <summary>Returns a collection of all terms matched by this query.</summary>
+               /// <deprecated> use extractTerms instead
+               /// </deprecated>
+               /// <seealso cref="ExtractTerms(Set)">
+               /// </seealso>
+        [Obsolete("use extractTerms instead")]
+               public override System.Collections.ICollection GetTerms()
+               {
+                       System.Collections.ArrayList terms = new System.Collections.ArrayList();
+                       terms.Add(term);
+                       return terms;
+               }
+               public override void  ExtractTerms(System.Collections.Hashtable terms)
+               {
+                       SupportClass.CollectionsHelper.AddIfNotContains(terms, term);
+               }
+               
+               public override System.String ToString(System.String field)
+               {
+                       System.Text.StringBuilder buffer = new System.Text.StringBuilder();
+                       if (term.Field().Equals(field))
+                               buffer.Append(term.Text());
+                       else
+                       {
+                               buffer.Append(term.ToString());
+                       }
+                       buffer.Append(ToStringUtils.Boost(GetBoost()));
+                       return buffer.ToString();
+               }
+               
+               public override int GetHashCode()
+               {
+                       int prime = 31;
+                       int result = base.GetHashCode();
+                       result = prime * result + ((term == null)?0:term.GetHashCode());
+                       return result;
+               }
+               
+               public  override bool Equals(System.Object obj)
+               {
+                       if (this == obj)
+                               return true;
+                       if (!base.Equals(obj))
+                               return false;
+                       if (GetType() != obj.GetType())
+                               return false;
+                       SpanTermQuery other = (SpanTermQuery) obj;
+                       if (term == null)
+                       {
+                               if (other.term != null)
+                                       return false;
+                       }
+                       else if (!term.Equals(other.term))
+                               return false;
+                       return true;
+               }
+               
+               public override Spans GetSpans(IndexReader reader)
+               {
+                       return new TermSpans(reader.TermPositions(term), term);
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Spans/SpanWeight.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Spans/SpanWeight.cs
new file mode 100644 (file)
index 0000000..85f7a17
--- /dev/null
@@ -0,0 +1,134 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+using Mono.Lucene.Net.Search;
+using IDFExplanation = Mono.Lucene.Net.Search.Explanation.IDFExplanation;
+
+namespace Mono.Lucene.Net.Search.Spans
+{
+       
+       /// <summary> Expert-only.  Public for use by other weight implementations</summary>
+       [Serializable]
+       public class SpanWeight:Weight
+       {
+               protected internal Similarity similarity;
+               protected internal float value_Renamed;
+               protected internal float idf;
+               protected internal float queryNorm;
+               protected internal float queryWeight;
+               
+               protected internal System.Collections.Hashtable terms;
+               protected internal SpanQuery query;
+               private IDFExplanation idfExp;
+               
+               public SpanWeight(SpanQuery query, Searcher searcher)
+               {
+                       this.similarity = query.GetSimilarity(searcher);
+                       this.query = query;
+                       terms = new System.Collections.Hashtable();
+                       query.ExtractTerms(terms);
+                       idfExp = similarity.idfExplain(new System.Collections.ArrayList(terms.Values), searcher);
+                       idf = idfExp.GetIdf();
+               }
+               
+               public override Query GetQuery()
+               {
+                       return query;
+               }
+               public override float GetValue()
+               {
+                       return value_Renamed;
+               }
+               
+               public override float SumOfSquaredWeights()
+               {
+                       queryWeight = idf * query.GetBoost(); // compute query weight
+                       return queryWeight * queryWeight; // square it
+               }
+               
+               public override void  Normalize(float queryNorm)
+               {
+                       this.queryNorm = queryNorm;
+                       queryWeight *= queryNorm; // normalize query weight
+                       value_Renamed = queryWeight * idf; // idf for document
+               }
+               
+               public override Scorer Scorer(IndexReader reader, bool scoreDocsInOrder, bool topScorer)
+               {
+                       return new SpanScorer(query.GetSpans(reader), this, similarity, reader.Norms(query.GetField()));
+               }
+               
+               public override Explanation Explain(IndexReader reader, int doc)
+               {
+                       
+                       ComplexExplanation result = new ComplexExplanation();
+                       result.SetDescription("weight(" + GetQuery() + " in " + doc + "), product of:");
+                       System.String field = ((SpanQuery) GetQuery()).GetField();
+                       
+                       Explanation idfExpl = new Explanation(idf, "idf(" + field + ": " + idfExp.Explain() + ")");
+                       
+                       // explain query weight
+                       Explanation queryExpl = new Explanation();
+                       queryExpl.SetDescription("queryWeight(" + GetQuery() + "), product of:");
+                       
+                       Explanation boostExpl = new Explanation(GetQuery().GetBoost(), "boost");
+                       if (GetQuery().GetBoost() != 1.0f)
+                               queryExpl.AddDetail(boostExpl);
+                       queryExpl.AddDetail(idfExpl);
+                       
+                       Explanation queryNormExpl = new Explanation(queryNorm, "queryNorm");
+                       queryExpl.AddDetail(queryNormExpl);
+                       
+                       queryExpl.SetValue(boostExpl.GetValue() * idfExpl.GetValue() * queryNormExpl.GetValue());
+                       
+                       result.AddDetail(queryExpl);
+                       
+                       // explain field weight
+                       ComplexExplanation fieldExpl = new ComplexExplanation();
+                       fieldExpl.SetDescription("fieldWeight(" + field + ":" + query.ToString(field) + " in " + doc + "), product of:");
+                       
+                       Explanation tfExpl = Scorer(reader, true, false).Explain(doc);
+                       fieldExpl.AddDetail(tfExpl);
+                       fieldExpl.AddDetail(idfExpl);
+                       
+                       Explanation fieldNormExpl = new Explanation();
+                       byte[] fieldNorms = reader.Norms(field);
+                       float fieldNorm = fieldNorms != null?Similarity.DecodeNorm(fieldNorms[doc]):1.0f;
+                       fieldNormExpl.SetValue(fieldNorm);
+                       fieldNormExpl.SetDescription("fieldNorm(field=" + field + ", doc=" + doc + ")");
+                       fieldExpl.AddDetail(fieldNormExpl);
+                       
+                       fieldExpl.SetMatch(tfExpl.IsMatch());
+                       fieldExpl.SetValue(tfExpl.GetValue() * idfExpl.GetValue() * fieldNormExpl.GetValue());
+                       
+                       result.AddDetail(fieldExpl);
+                       System.Boolean? tempAux = fieldExpl.GetMatch();
+                       result.SetMatch(tempAux);
+                       
+                       // combine them
+                       result.SetValue(queryExpl.GetValue() * fieldExpl.GetValue());
+                       
+                       if (queryExpl.GetValue() == 1.0f)
+                               return fieldExpl;
+                       
+                       return result;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Spans/Spans.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Spans/Spans.cs
new file mode 100644 (file)
index 0000000..14959e7
--- /dev/null
@@ -0,0 +1,92 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Search.Spans
+{
+       
+       /// <summary>Expert: an enumeration of span matches.  Used to implement span searching.
+       /// Each span represents a range of term positions within a document.  Matches
+       /// are enumerated in order, by increasing document number, within that by
+       /// increasing start position and finally by increasing end position. 
+       /// </summary>
+       public abstract class Spans
+       {
+               /// <summary>Move to the next match, returning true iff any such exists. </summary>
+               public abstract bool Next();
+               
+               /// <summary>Skips to the first match beyond the current, whose document number is
+               /// greater than or equal to <i>target</i>. <p/>Returns true iff there is such
+               /// a match.  <p/>Behaves as if written: <pre>
+               /// boolean skipTo(int target) {
+               /// do {
+               /// if (!next())
+               /// return false;
+               /// } while (target > doc());
+               /// return true;
+               /// }
+               /// </pre>
+               /// Most implementations are considerably more efficient than that.
+               /// </summary>
+               public abstract bool SkipTo(int target);
+               
+               /// <summary>Returns the document number of the current match.  Initially invalid. </summary>
+               public abstract int Doc();
+               
+               /// <summary>Returns the start position of the current match.  Initially invalid. </summary>
+               public abstract int Start();
+               
+               /// <summary>Returns the end position of the current match.  Initially invalid. </summary>
+               public abstract int End();
+               
+               /// <summary> Returns the payload data for the current span.
+               /// This is invalid until {@link #Next()} is called for
+               /// the first time.
+               /// This method must not be called more than once after each call
+               /// of {@link #Next()}. However, most payloads are loaded lazily,
+               /// so if the payload data for the current position is not needed,
+               /// this method may not be called at all for performance reasons. An ordered
+               /// SpanQuery does not lazy load, so if you have payloads in your index and
+               /// you do not want ordered SpanNearQuerys to collect payloads, you can
+               /// disable collection with a constructor option.<br/>
+               /// 
+               /// Note that the return type is a collection, thus the ordering should not be relied upon.
+               /// <br/>
+               /// <p/><font color="#FF0000">
+               /// WARNING: The status of the <b>Payloads</b> feature is experimental.
+               /// The APIs introduced here might change in the future and will not be
+               /// supported anymore in such a case.</font><p/>
+               /// 
+               /// </summary>
+               /// <returns> a List of byte arrays containing the data of this payload, otherwise null if isPayloadAvailable is false
+               /// </returns>
+               /// <throws>  java.io.IOException </throws>
+               // TODO: Remove warning after API has been finalized
+               public abstract System.Collections.Generic.ICollection<byte[]> GetPayload();
+               
+               /// <summary> Checks if a payload can be loaded at this position.
+               /// <p/>
+               /// Payloads can only be loaded once per call to
+               /// {@link #Next()}.
+               /// 
+               /// </summary>
+               /// <returns> true if there is a payload available at this position that can be loaded
+               /// </returns>
+               public abstract bool IsPayloadAvailable();
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Spans/TermSpans.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Spans/TermSpans.cs
new file mode 100644 (file)
index 0000000..a774f0d
--- /dev/null
@@ -0,0 +1,125 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using Term = Mono.Lucene.Net.Index.Term;
+using TermPositions = Mono.Lucene.Net.Index.TermPositions;
+
+namespace Mono.Lucene.Net.Search.Spans
+{
+       
+       /// <summary> Expert:
+       /// Public for extension only
+       /// </summary>
+       public class TermSpans:Spans
+       {
+               protected internal TermPositions positions;
+               protected internal Term term;
+               protected internal int doc;
+               protected internal int freq;
+               protected internal int count;
+               protected internal int position;
+               
+               
+               public TermSpans(TermPositions positions, Term term)
+               {
+                       
+                       this.positions = positions;
+                       this.term = term;
+                       doc = - 1;
+               }
+               
+               public override bool Next()
+               {
+                       if (count == freq)
+                       {
+                               if (!positions.Next())
+                               {
+                                       doc = System.Int32.MaxValue;
+                                       return false;
+                               }
+                               doc = positions.Doc();
+                               freq = positions.Freq();
+                               count = 0;
+                       }
+                       position = positions.NextPosition();
+                       count++;
+                       return true;
+               }
+               
+               public override bool SkipTo(int target)
+               {
+                       if (!positions.SkipTo(target))
+                       {
+                               doc = System.Int32.MaxValue;
+                               return false;
+                       }
+                       
+                       doc = positions.Doc();
+                       freq = positions.Freq();
+                       count = 0;
+                       
+                       position = positions.NextPosition();
+                       count++;
+                       
+                       return true;
+               }
+               
+               public override int Doc()
+               {
+                       return doc;
+               }
+               
+               public override int Start()
+               {
+                       return position;
+               }
+               
+               public override int End()
+               {
+                       return position + 1;
+               }
+               
+               // TODO: Remove warning after API has been finalized
+               public override System.Collections.Generic.ICollection<byte[]> GetPayload()
+               {
+                       byte[] bytes = new byte[positions.GetPayloadLength()];
+                       bytes = positions.GetPayload(bytes, 0);
+            System.Collections.Generic.List<byte[]> val = new System.Collections.Generic.List<byte[]>();
+            val.Add(bytes);
+            return val;
+               }
+               
+               // TODO: Remove warning after API has been finalized
+               public override bool IsPayloadAvailable()
+               {
+                       return positions.IsPayloadAvailable();
+               }
+               
+               public override System.String ToString()
+               {
+                       return "spans(" + term.ToString() + ")@" + (doc == - 1?"START":((doc == System.Int32.MaxValue)?"END":doc + "-" + position));
+               }
+               
+               
+               public virtual TermPositions GetPositions()
+               {
+                       return positions;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/TermQuery.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/TermQuery.cs
new file mode 100644 (file)
index 0000000..b8e4943
--- /dev/null
@@ -0,0 +1,213 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+using Term = Mono.Lucene.Net.Index.Term;
+using TermDocs = Mono.Lucene.Net.Index.TermDocs;
+using ToStringUtils = Mono.Lucene.Net.Util.ToStringUtils;
+using IDFExplanation = Mono.Lucene.Net.Search.Explanation.IDFExplanation;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary>A Query that matches documents containing a term.
+       /// This may be combined with other terms with a {@link BooleanQuery}.
+       /// </summary>
+       [Serializable]
+       public class TermQuery:Query
+       {
+               private Term term;
+               
+               [Serializable]
+               private class TermWeight:Weight
+               {
+                       private void  InitBlock(TermQuery enclosingInstance)
+                       {
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private TermQuery enclosingInstance;
+                       public TermQuery Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       private Similarity similarity;
+                       private float value_Renamed;
+                       private float idf;
+                       private float queryNorm;
+                       private float queryWeight;
+                       private IDFExplanation idfExp;
+                       
+                       public TermWeight(TermQuery enclosingInstance, Searcher searcher)
+                       {
+                               InitBlock(enclosingInstance);
+                               this.similarity = Enclosing_Instance.GetSimilarity(searcher);
+                               idfExp = similarity.IdfExplain(Enclosing_Instance.term, searcher);
+                               idf = idfExp.GetIdf();
+                       }
+                       
+                       public override System.String ToString()
+                       {
+                               return "weight(" + Enclosing_Instance + ")";
+                       }
+                       
+                       public override Query GetQuery()
+                       {
+                               return Enclosing_Instance;
+                       }
+                       public override float GetValue()
+                       {
+                               return value_Renamed;
+                       }
+                       
+                       public override float SumOfSquaredWeights()
+                       {
+                               queryWeight = idf * Enclosing_Instance.GetBoost(); // compute query weight
+                               return queryWeight * queryWeight; // square it
+                       }
+                       
+                       public override void  Normalize(float queryNorm)
+                       {
+                               this.queryNorm = queryNorm;
+                               queryWeight *= queryNorm; // normalize query weight
+                               value_Renamed = queryWeight * idf; // idf for document
+                       }
+                       
+                       public override Scorer Scorer(IndexReader reader, bool scoreDocsInOrder, bool topScorer)
+                       {
+                               TermDocs termDocs = reader.TermDocs(Enclosing_Instance.term);
+                               
+                               if (termDocs == null)
+                                       return null;
+                               
+                               return new TermScorer(this, termDocs, similarity, reader.Norms(Enclosing_Instance.term.Field()));
+                       }
+                       
+                       public override Explanation Explain(IndexReader reader, int doc)
+                       {
+                               
+                               ComplexExplanation result = new ComplexExplanation();
+                               result.SetDescription("weight(" + GetQuery() + " in " + doc + "), product of:");
+                               
+                               Explanation expl = new Explanation(idf, idfExp.Explain());
+                               
+                               // explain query weight
+                               Explanation queryExpl = new Explanation();
+                               queryExpl.SetDescription("queryWeight(" + GetQuery() + "), product of:");
+                               
+                               Explanation boostExpl = new Explanation(Enclosing_Instance.GetBoost(), "boost");
+                               if (Enclosing_Instance.GetBoost() != 1.0f)
+                                       queryExpl.AddDetail(boostExpl);
+                               queryExpl.AddDetail(expl);
+                               
+                               Explanation queryNormExpl = new Explanation(queryNorm, "queryNorm");
+                               queryExpl.AddDetail(queryNormExpl);
+                               
+                               queryExpl.SetValue(boostExpl.GetValue() * expl.GetValue() * queryNormExpl.GetValue());
+                               
+                               result.AddDetail(queryExpl);
+                               
+                               // explain field weight
+                               System.String field = Enclosing_Instance.term.Field();
+                               ComplexExplanation fieldExpl = new ComplexExplanation();
+                               fieldExpl.SetDescription("fieldWeight(" + Enclosing_Instance.term + " in " + doc + "), product of:");
+                               
+                               Explanation tfExpl = Scorer(reader, true, false).Explain(doc);
+                               fieldExpl.AddDetail(tfExpl);
+                               fieldExpl.AddDetail(expl);
+                               
+                               Explanation fieldNormExpl = new Explanation();
+                               byte[] fieldNorms = reader.Norms(field);
+                               float fieldNorm = fieldNorms != null?Similarity.DecodeNorm(fieldNorms[doc]):1.0f;
+                               fieldNormExpl.SetValue(fieldNorm);
+                               fieldNormExpl.SetDescription("fieldNorm(field=" + field + ", doc=" + doc + ")");
+                               fieldExpl.AddDetail(fieldNormExpl);
+                               
+                               fieldExpl.SetMatch(tfExpl.IsMatch());
+                               fieldExpl.SetValue(tfExpl.GetValue() * expl.GetValue() * fieldNormExpl.GetValue());
+                               
+                               result.AddDetail(fieldExpl);
+                               System.Boolean? tempAux = fieldExpl.GetMatch();
+                               result.SetMatch(tempAux);
+                               
+                               // combine them
+                               result.SetValue(queryExpl.GetValue() * fieldExpl.GetValue());
+                               
+                               if (queryExpl.GetValue() == 1.0f)
+                                       return fieldExpl;
+                               
+                               return result;
+                       }
+               }
+               
+               /// <summary>Constructs a query for the term <code>t</code>. </summary>
+               public TermQuery(Term t)
+               {
+                       term = t;
+               }
+               
+               /// <summary>Returns the term of this query. </summary>
+               public virtual Term GetTerm()
+               {
+                       return term;
+               }
+               
+               public override Weight CreateWeight(Searcher searcher)
+               {
+                       return new TermWeight(this, searcher);
+               }
+               
+               public override void  ExtractTerms(System.Collections.Hashtable terms)
+               {
+                       SupportClass.CollectionsHelper.AddIfNotContains(terms, GetTerm());
+               }
+               
+               /// <summary>Prints a user-readable version of this query. </summary>
+               public override System.String ToString(System.String field)
+               {
+                       System.Text.StringBuilder buffer = new System.Text.StringBuilder();
+                       if (!term.Field().Equals(field))
+                       {
+                               buffer.Append(term.Field());
+                               buffer.Append(":");
+                       }
+                       buffer.Append(term.Text());
+                       buffer.Append(ToStringUtils.Boost(GetBoost()));
+                       return buffer.ToString();
+               }
+               
+               /// <summary>Returns true iff <code>o</code> is equal to this. </summary>
+               public  override bool Equals(System.Object o)
+               {
+                       if (!(o is TermQuery))
+                               return false;
+                       TermQuery other = (TermQuery) o;
+                       return (this.GetBoost() == other.GetBoost()) && this.term.Equals(other.term);
+               }
+               
+               /// <summary>Returns a hash code value for this object.</summary>
+               public override int GetHashCode()
+               {
+                       return BitConverter.ToInt32(BitConverter.GetBytes(GetBoost()), 0) ^ term.GetHashCode();
+        }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/TermRangeFilter.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/TermRangeFilter.cs
new file mode 100644 (file)
index 0000000..31781b1
--- /dev/null
@@ -0,0 +1,135 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary> A Filter that restricts search results to a range of values in a given
+       /// field.
+       /// 
+       /// <p/>This filter matches the documents looking for terms that fall into the
+       /// supplied range according to {@link String#compareTo(String)}. It is not intended
+       /// for numerical ranges, use {@link NumericRangeFilter} instead.
+       /// 
+       /// <p/>If you construct a large number of range filters with different ranges but on the 
+       /// same field, {@link FieldCacheRangeFilter} may have significantly better performance. 
+       /// </summary>
+       /// <since> 2.9
+       /// </since>
+       [Serializable]
+       public class TermRangeFilter:MultiTermQueryWrapperFilter
+       {
+               
+               /// <param name="fieldName">The field this range applies to
+               /// </param>
+               /// <param name="lowerTerm">The lower bound on this range
+               /// </param>
+               /// <param name="upperTerm">The upper bound on this range
+               /// </param>
+               /// <param name="includeLower">Does this range include the lower bound?
+               /// </param>
+               /// <param name="includeUpper">Does this range include the upper bound?
+               /// </param>
+               /// <throws>  IllegalArgumentException if both terms are null or if </throws>
+               /// <summary>  lowerTerm is null and includeLower is true (similar for upperTerm
+               /// and includeUpper)
+               /// </summary>
+               public TermRangeFilter(System.String fieldName, System.String lowerTerm, System.String upperTerm, bool includeLower, bool includeUpper):base(new TermRangeQuery(fieldName, lowerTerm, upperTerm, includeLower, includeUpper))
+               {
+               }
+               
+               /// <summary> <strong>WARNING:</strong> Using this constructor and supplying a non-null
+               /// value in the <code>collator</code> parameter will cause every single 
+               /// index Term in the Field referenced by lowerTerm and/or upperTerm to be
+               /// examined.  Depending on the number of index Terms in this Field, the 
+               /// operation could be very slow.
+               /// 
+               /// </summary>
+               /// <param name="lowerTerm">The lower bound on this range
+               /// </param>
+               /// <param name="upperTerm">The upper bound on this range
+               /// </param>
+               /// <param name="includeLower">Does this range include the lower bound?
+               /// </param>
+               /// <param name="includeUpper">Does this range include the upper bound?
+               /// </param>
+               /// <param name="collator">The collator to use when determining range inclusion; set
+               /// to null to use Unicode code point ordering instead of collation.
+               /// </param>
+               /// <throws>  IllegalArgumentException if both terms are null or if </throws>
+               /// <summary>  lowerTerm is null and includeLower is true (similar for upperTerm
+               /// and includeUpper)
+               /// </summary>
+               public TermRangeFilter(System.String fieldName, System.String lowerTerm, System.String upperTerm, bool includeLower, bool includeUpper, System.Globalization.CompareInfo collator):base(new TermRangeQuery(fieldName, lowerTerm, upperTerm, includeLower, includeUpper, collator))
+               {
+               }
+               
+               /// <summary> Constructs a filter for field <code>fieldName</code> matching
+               /// less than or equal to <code>upperTerm</code>.
+               /// </summary>
+               public static TermRangeFilter Less(System.String fieldName, System.String upperTerm)
+               {
+                       return new TermRangeFilter(fieldName, null, upperTerm, false, true);
+               }
+               
+               /// <summary> Constructs a filter for field <code>fieldName</code> matching
+               /// greater than or equal to <code>lowerTerm</code>.
+               /// </summary>
+               public static TermRangeFilter More(System.String fieldName, System.String lowerTerm)
+               {
+                       return new TermRangeFilter(fieldName, lowerTerm, null, true, false);
+               }
+               
+               /// <summary>Returns the field name for this filter </summary>
+               public virtual System.String GetField()
+               {
+                       return ((TermRangeQuery) query).GetField();
+               }
+               
+               /// <summary>Returns the lower value of this range filter </summary>
+               public virtual System.String GetLowerTerm()
+               {
+                       return ((TermRangeQuery) query).GetLowerTerm();
+               }
+               
+               /// <summary>Returns the upper value of this range filter </summary>
+               public virtual System.String GetUpperTerm()
+               {
+                       return ((TermRangeQuery) query).GetUpperTerm();
+               }
+               
+               /// <summary>Returns <code>true</code> if the lower endpoint is inclusive </summary>
+               public virtual bool IncludesLower()
+               {
+                       return ((TermRangeQuery) query).IncludesLower();
+               }
+               
+               /// <summary>Returns <code>true</code> if the upper endpoint is inclusive </summary>
+               public virtual bool IncludesUpper()
+               {
+                       return ((TermRangeQuery) query).IncludesUpper();
+               }
+               
+               /// <summary>Returns the collator used to determine range inclusion, if any. </summary>
+               public virtual System.Globalization.CompareInfo GetCollator()
+               {
+                       return ((TermRangeQuery) query).GetCollator();
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/TermRangeQuery.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/TermRangeQuery.cs
new file mode 100644 (file)
index 0000000..a021339
--- /dev/null
@@ -0,0 +1,237 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+using ToStringUtils = Mono.Lucene.Net.Util.ToStringUtils;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary> A Query that matches documents within an exclusive range of terms.
+       /// 
+       /// <p/>This query matches the documents looking for terms that fall into the
+       /// supplied range according to {@link String#compareTo(String)}. It is not intended
+       /// for numerical ranges, use {@link NumericRangeQuery} instead.
+       /// 
+       /// <p/>This query uses the {@link
+       /// MultiTermQuery#CONSTANT_SCORE_AUTO_REWRITE_DEFAULT}
+       /// rewrite method.
+       /// </summary>
+       /// <since> 2.9
+       /// </since>
+       
+       [Serializable]
+       public class TermRangeQuery:MultiTermQuery
+       {
+               private System.String lowerTerm;
+               private System.String upperTerm;
+               private System.Globalization.CompareInfo collator;
+               private System.String field;
+               private bool includeLower;
+               private bool includeUpper;
+               
+               
+               /// <summary> Constructs a query selecting all terms greater/equal than <code>lowerTerm</code>
+               /// but less/equal than <code>upperTerm</code>. 
+               /// 
+               /// <p/>
+               /// If an endpoint is null, it is said 
+               /// to be "open". Either or both endpoints may be open.  Open endpoints may not 
+               /// be exclusive (you can't select all but the first or last term without 
+               /// explicitly specifying the term to exclude.)
+               /// 
+               /// </summary>
+               /// <param name="field">The field that holds both lower and upper terms.
+               /// </param>
+               /// <param name="lowerTerm">The term text at the lower end of the range
+               /// </param>
+               /// <param name="upperTerm">The term text at the upper end of the range
+               /// </param>
+               /// <param name="includeLower">If true, the <code>lowerTerm</code> is
+               /// included in the range.
+               /// </param>
+               /// <param name="includeUpper">If true, the <code>upperTerm</code> is
+               /// included in the range.
+               /// </param>
+               public TermRangeQuery(System.String field, System.String lowerTerm, System.String upperTerm, bool includeLower, bool includeUpper):this(field, lowerTerm, upperTerm, includeLower, includeUpper, null)
+               {
+               }
+               
+               /// <summary>Constructs a query selecting all terms greater/equal than
+               /// <code>lowerTerm</code> but less/equal than <code>upperTerm</code>.
+               /// <p/>
+               /// If an endpoint is null, it is said 
+               /// to be "open". Either or both endpoints may be open.  Open endpoints may not 
+               /// be exclusive (you can't select all but the first or last term without 
+               /// explicitly specifying the term to exclude.)
+               /// <p/>
+               /// If <code>collator</code> is not null, it will be used to decide whether
+               /// index terms are within the given range, rather than using the Unicode code
+               /// point order in which index terms are stored.
+               /// <p/>
+               /// <strong>WARNING:</strong> Using this constructor and supplying a non-null
+               /// value in the <code>collator</code> parameter will cause every single 
+               /// index Term in the Field referenced by lowerTerm and/or upperTerm to be
+               /// examined.  Depending on the number of index Terms in this Field, the 
+               /// operation could be very slow.
+               /// 
+               /// </summary>
+               /// <param name="lowerTerm">The Term text at the lower end of the range
+               /// </param>
+               /// <param name="upperTerm">The Term text at the upper end of the range
+               /// </param>
+               /// <param name="includeLower">If true, the <code>lowerTerm</code> is
+               /// included in the range.
+               /// </param>
+               /// <param name="includeUpper">If true, the <code>upperTerm</code> is
+               /// included in the range.
+               /// </param>
+               /// <param name="collator">The collator to use to collate index Terms, to determine
+               /// their membership in the range bounded by <code>lowerTerm</code> and
+               /// <code>upperTerm</code>.
+               /// </param>
+               public TermRangeQuery(System.String field, System.String lowerTerm, System.String upperTerm, bool includeLower, bool includeUpper, System.Globalization.CompareInfo collator)
+               {
+                       this.field = field;
+                       this.lowerTerm = lowerTerm;
+                       this.upperTerm = upperTerm;
+                       this.includeLower = includeLower;
+                       this.includeUpper = includeUpper;
+                       this.collator = collator;
+               }
+               
+               /// <summary>Returns the field name for this query </summary>
+               public virtual System.String GetField()
+               {
+                       return field;
+               }
+               
+               /// <summary>Returns the lower value of this range query </summary>
+               public virtual System.String GetLowerTerm()
+               {
+                       return lowerTerm;
+               }
+               
+               /// <summary>Returns the upper value of this range query </summary>
+               public virtual System.String GetUpperTerm()
+               {
+                       return upperTerm;
+               }
+               
+               /// <summary>Returns <code>true</code> if the lower endpoint is inclusive </summary>
+               public virtual bool IncludesLower()
+               {
+                       return includeLower;
+               }
+               
+               /// <summary>Returns <code>true</code> if the upper endpoint is inclusive </summary>
+               public virtual bool IncludesUpper()
+               {
+                       return includeUpper;
+               }
+               
+               /// <summary>Returns the collator used to determine range inclusion, if any. </summary>
+               public virtual System.Globalization.CompareInfo GetCollator()
+               {
+                       return collator;
+               }
+               
+               public /*protected internal*/ override FilteredTermEnum GetEnum(IndexReader reader)
+               {
+                       return new TermRangeTermEnum(reader, field, lowerTerm, upperTerm, includeLower, includeUpper, collator);
+               }
+               
+               /// <summary>Prints a user-readable version of this query. </summary>
+               public override System.String ToString(System.String field)
+               {
+                       System.Text.StringBuilder buffer = new System.Text.StringBuilder();
+                       if (!GetField().Equals(field))
+                       {
+                               buffer.Append(GetField());
+                               buffer.Append(":");
+                       }
+                       buffer.Append(includeLower?'[':'{');
+                       buffer.Append(lowerTerm != null?lowerTerm:"*");
+                       buffer.Append(" TO ");
+                       buffer.Append(upperTerm != null?upperTerm:"*");
+                       buffer.Append(includeUpper?']':'}');
+                       buffer.Append(ToStringUtils.Boost(GetBoost()));
+                       return buffer.ToString();
+               }
+               
+               //@Override
+               public override int GetHashCode()
+               {
+                       int prime = 31;
+                       int result = base.GetHashCode();
+                       result = prime * result + ((collator == null)?0:collator.GetHashCode());
+                       result = prime * result + ((field == null)?0:field.GetHashCode());
+                       result = prime * result + (includeLower?1231:1237);
+                       result = prime * result + (includeUpper?1231:1237);
+                       result = prime * result + ((lowerTerm == null)?0:lowerTerm.GetHashCode());
+                       result = prime * result + ((upperTerm == null)?0:upperTerm.GetHashCode());
+                       return result;
+               }
+               
+               //@Override
+               public  override bool Equals(System.Object obj)
+               {
+                       if (this == obj)
+                               return true;
+                       if (!base.Equals(obj))
+                               return false;
+                       if (GetType() != obj.GetType())
+                               return false;
+                       TermRangeQuery other = (TermRangeQuery) obj;
+                       if (collator == null)
+                       {
+                               if (other.collator != null)
+                                       return false;
+                       }
+                       else if (!collator.Equals(other.collator))
+                               return false;
+                       if (field == null)
+                       {
+                               if (other.field != null)
+                                       return false;
+                       }
+                       else if (!field.Equals(other.field))
+                               return false;
+                       if (includeLower != other.includeLower)
+                               return false;
+                       if (includeUpper != other.includeUpper)
+                               return false;
+                       if (lowerTerm == null)
+                       {
+                               if (other.lowerTerm != null)
+                                       return false;
+                       }
+                       else if (!lowerTerm.Equals(other.lowerTerm))
+                               return false;
+                       if (upperTerm == null)
+                       {
+                               if (other.upperTerm != null)
+                                       return false;
+                       }
+                       else if (!upperTerm.Equals(other.upperTerm))
+                               return false;
+                       return true;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/TermRangeTermEnum.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/TermRangeTermEnum.cs
new file mode 100644 (file)
index 0000000..b8fe209
--- /dev/null
@@ -0,0 +1,164 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+using Term = Mono.Lucene.Net.Index.Term;
+using StringHelper = Mono.Lucene.Net.Util.StringHelper;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary> Subclass of FilteredTermEnum for enumerating all terms that match the
+       /// specified range parameters.
+       /// <p/>
+       /// Term enumerations are always ordered by Term.compareTo().  Each term in
+       /// the enumeration is greater than all that precede it.
+       /// </summary>
+       /// <since> 2.9
+       /// </since>
+       public class TermRangeTermEnum:FilteredTermEnum
+       {
+               
+               private System.Globalization.CompareInfo collator = null;
+               private bool endEnum = false;
+               private System.String field;
+               private System.String upperTermText;
+               private System.String lowerTermText;
+               private bool includeLower;
+               private bool includeUpper;
+               
+               /// <summary> Enumerates all terms greater/equal than <code>lowerTerm</code>
+               /// but less/equal than <code>upperTerm</code>. 
+               /// 
+               /// If an endpoint is null, it is said to be "open". Either or both 
+               /// endpoints may be open.  Open endpoints may not be exclusive 
+               /// (you can't select all but the first or last term without 
+               /// explicitly specifying the term to exclude.)
+               /// 
+               /// </summary>
+               /// <param name="reader">
+               /// </param>
+               /// <param name="field">An interned field that holds both lower and upper terms.
+               /// </param>
+               /// <param name="lowerTermText">The term text at the lower end of the range
+               /// </param>
+               /// <param name="upperTermText">The term text at the upper end of the range
+               /// </param>
+               /// <param name="includeLower">If true, the <code>lowerTerm</code> is included in the range.
+               /// </param>
+               /// <param name="includeUpper">If true, the <code>upperTerm</code> is included in the range.
+               /// </param>
+               /// <param name="collator">The collator to use to collate index Terms, to determine their
+               /// membership in the range bounded by <code>lowerTerm</code> and
+               /// <code>upperTerm</code>.
+               /// 
+               /// </param>
+               /// <throws>  IOException </throws>
+               public TermRangeTermEnum(IndexReader reader, System.String field, System.String lowerTermText, System.String upperTermText, bool includeLower, bool includeUpper, System.Globalization.CompareInfo collator)
+               {
+                       this.collator = collator;
+                       this.upperTermText = upperTermText;
+                       this.lowerTermText = lowerTermText;
+                       this.includeLower = includeLower;
+                       this.includeUpper = includeUpper;
+                       this.field = StringHelper.Intern(field);
+                       
+                       // do a little bit of normalization...
+                       // open ended range queries should always be inclusive.
+                       if (this.lowerTermText == null)
+                       {
+                               this.lowerTermText = "";
+                               this.includeLower = true;
+                       }
+                       
+                       if (this.upperTermText == null)
+                       {
+                               this.includeUpper = true;
+                       }
+                       
+                       System.String startTermText = collator == null?this.lowerTermText:"";
+                       SetEnum(reader.Terms(new Term(this.field, startTermText)));
+               }
+               
+               public override float Difference()
+               {
+                       return 1.0f;
+               }
+               
+               public override bool EndEnum()
+               {
+                       return endEnum;
+               }
+               
+               public /*protected internal*/ override bool TermCompare(Term term)
+               {
+                       if (collator == null)
+                       {
+                               // Use Unicode code point ordering
+                               bool checkLower = false;
+                               if (!includeLower)
+                               // make adjustments to set to exclusive
+                                       checkLower = true;
+                               if (term != null && (System.Object) term.Field() == (System.Object) field)
+                               {
+                                       // interned comparison
+                                       if (!checkLower || null == lowerTermText || String.CompareOrdinal(term.Text(), lowerTermText) > 0)
+                                       {
+                                               checkLower = false;
+                                               if (upperTermText != null)
+                                               {
+                                                       int compare = String.CompareOrdinal(upperTermText, term.Text());
+                                                       /*
+                                                       * if beyond the upper term, or is exclusive and this is equal to
+                                                       * the upper term, break out
+                                                       */
+                                                       if ((compare < 0) || (!includeUpper && compare == 0))
+                                                       {
+                                                               endEnum = true;
+                                                               return false;
+                                                       }
+                                               }
+                                               return true;
+                                       }
+                               }
+                               else
+                               {
+                                       // break
+                                       endEnum = true;
+                                       return false;
+                               }
+                               return false;
+                       }
+                       else
+                       {
+                               if (term != null && (System.Object) term.Field() == (System.Object) field)
+                               {
+                                       // interned comparison
+                                       if ((lowerTermText == null || (includeLower?collator.Compare(term.Text().ToString(), lowerTermText.ToString()) >= 0:collator.Compare(term.Text().ToString(), lowerTermText.ToString()) > 0)) && (upperTermText == null || (includeUpper?collator.Compare(term.Text().ToString(), upperTermText.ToString()) <= 0:collator.Compare(term.Text().ToString(), upperTermText.ToString()) < 0)))
+                                       {
+                                               return true;
+                                       }
+                                       return false;
+                               }
+                               endEnum = true;
+                               return false;
+                       }
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/TermScorer.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/TermScorer.cs
new file mode 100644 (file)
index 0000000..d34e680
--- /dev/null
@@ -0,0 +1,278 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using TermDocs = Mono.Lucene.Net.Index.TermDocs;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary>Expert: A <code>Scorer</code> for documents matching a <code>Term</code>.</summary>
+       public sealed class TermScorer:Scorer
+       {
+               
+               private static readonly float[] SIM_NORM_DECODER;
+               
+               private Weight weight;
+               private TermDocs termDocs;
+               private byte[] norms;
+               private float weightValue;
+               private int doc = - 1;
+               
+               private int[] docs = new int[32]; // buffered doc numbers
+               private int[] freqs = new int[32]; // buffered term freqs
+               private int pointer;
+               private int pointerMax;
+               
+               private const int SCORE_CACHE_SIZE = 32;
+               private float[] scoreCache = new float[SCORE_CACHE_SIZE];
+               
+               /// <summary> Construct a <code>TermScorer</code>.
+               /// 
+               /// </summary>
+               /// <param name="weight">The weight of the <code>Term</code> in the query.
+               /// </param>
+               /// <param name="td">An iterator over the documents matching the <code>Term</code>.
+               /// </param>
+               /// <param name="similarity">The <code>Similarity</code> implementation to be used for score
+               /// computations.
+               /// </param>
+               /// <param name="norms">The field norms of the document fields for the <code>Term</code>.
+               /// </param>
+               public /*internal*/ TermScorer(Weight weight, TermDocs td, Similarity similarity, byte[] norms):base(similarity)
+               {
+                       this.weight = weight;
+                       this.termDocs = td;
+                       this.norms = norms;
+                       this.weightValue = weight.GetValue();
+                       
+                       for (int i = 0; i < SCORE_CACHE_SIZE; i++)
+                               scoreCache[i] = GetSimilarity().Tf(i) * weightValue;
+               }
+               
+               /// <deprecated> use {@link #Score(Collector)} instead. 
+               /// </deprecated>
+        [Obsolete("use Score(Collector) instead. ")]
+               public override void  Score(HitCollector hc)
+               {
+                       Score(new HitCollectorWrapper(hc));
+               }
+               
+               public override void  Score(Collector c)
+               {
+                       Score(c, System.Int32.MaxValue, NextDoc());
+               }
+               
+               /// <deprecated> use {@link #Score(Collector, int, int)} instead. 
+               /// </deprecated>
+        [Obsolete("use Score(Collector, int, int) instead.")]
+               protected internal override bool Score(HitCollector c, int end)
+               {
+                       return Score(new HitCollectorWrapper(c), end, doc);
+               }
+               
+               // firstDocID is ignored since nextDoc() sets 'doc'
+               public /*protected internal*/ override bool Score(Collector c, int end, int firstDocID)
+               {
+                       c.SetScorer(this);
+                       while (doc < end)
+                       {
+                               // for docs in window
+                               c.Collect(doc); // collect score
+                               
+                               if (++pointer >= pointerMax)
+                               {
+                                       pointerMax = termDocs.Read(docs, freqs); // refill buffers
+                                       if (pointerMax != 0)
+                                       {
+                                               pointer = 0;
+                                       }
+                                       else
+                                       {
+                                               termDocs.Close(); // close stream
+                                               doc = System.Int32.MaxValue; // set to sentinel value
+                                               return false;
+                                       }
+                               }
+                               doc = docs[pointer];
+                       }
+                       return true;
+               }
+               
+               /// <deprecated> use {@link #DocID()} instead. 
+               /// </deprecated>
+        [Obsolete("use DocID() instead. ")]
+               public override int Doc()
+               {
+                       return doc;
+               }
+               
+               public override int DocID()
+               {
+                       return doc;
+               }
+               
+               /// <summary> Advances to the next document matching the query. <br/>
+               /// The iterator over the matching documents is buffered using
+               /// {@link TermDocs#Read(int[],int[])}.
+               /// 
+               /// </summary>
+               /// <returns> true iff there is another document matching the query.
+               /// </returns>
+               /// <deprecated> use {@link #NextDoc()} instead.
+               /// </deprecated>
+        [Obsolete("use NextDoc() instead.")]
+               public override bool Next()
+               {
+                       return NextDoc() != NO_MORE_DOCS;
+               }
+               
+               /// <summary> Advances to the next document matching the query. <br/>
+               /// The iterator over the matching documents is buffered using
+               /// {@link TermDocs#Read(int[],int[])}.
+               /// 
+               /// </summary>
+               /// <returns> the document matching the query or -1 if there are no more documents.
+               /// </returns>
+               public override int NextDoc()
+               {
+                       pointer++;
+                       if (pointer >= pointerMax)
+                       {
+                               pointerMax = termDocs.Read(docs, freqs); // refill buffer
+                               if (pointerMax != 0)
+                               {
+                                       pointer = 0;
+                               }
+                               else
+                               {
+                                       termDocs.Close(); // close stream
+                                       return doc = NO_MORE_DOCS;
+                               }
+                       }
+                       doc = docs[pointer];
+                       return doc;
+               }
+               
+               public override float Score()
+               {
+                       System.Diagnostics.Debug.Assert(doc != - 1);
+                       int f = freqs[pointer];
+                       float raw = f < SCORE_CACHE_SIZE?scoreCache[f]:GetSimilarity().Tf(f) * weightValue; // cache miss
+                       
+                       return norms == null?raw:raw * SIM_NORM_DECODER[norms[doc] & 0xFF]; // normalize for field
+               }
+               
+               /// <summary> Skips to the first match beyond the current whose document number is
+               /// greater than or equal to a given target. <br/>
+               /// The implementation uses {@link TermDocs#SkipTo(int)}.
+               /// 
+               /// </summary>
+               /// <param name="target">The target document number.
+               /// </param>
+               /// <returns> true iff there is such a match.
+               /// </returns>
+               /// <deprecated> use {@link #Advance(int)} instead.
+               /// </deprecated>
+        [Obsolete("use Advance(int) instead.")]
+               public override bool SkipTo(int target)
+               {
+                       return Advance(target) != NO_MORE_DOCS;
+               }
+               
+               /// <summary> Advances to the first match beyond the current whose document number is
+               /// greater than or equal to a given target. <br/>
+               /// The implementation uses {@link TermDocs#SkipTo(int)}.
+               /// 
+               /// </summary>
+               /// <param name="target">The target document number.
+               /// </param>
+               /// <returns> the matching document or -1 if none exist.
+               /// </returns>
+               public override int Advance(int target)
+               {
+                       // first scan in cache
+                       for (pointer++; pointer < pointerMax; pointer++)
+                       {
+                               if (docs[pointer] >= target)
+                               {
+                                       return doc = docs[pointer];
+                               }
+                       }
+                       
+                       // not found in cache, seek underlying stream
+                       bool result = termDocs.SkipTo(target);
+                       if (result)
+                       {
+                               pointerMax = 1;
+                               pointer = 0;
+                               docs[pointer] = doc = termDocs.Doc();
+                               freqs[pointer] = termDocs.Freq();
+                       }
+                       else
+                       {
+                               doc = NO_MORE_DOCS;
+                       }
+                       return doc;
+               }
+               
+               /// <summary>Returns an explanation of the score for a document.
+               /// <br/>When this method is used, the {@link #Next()} method
+               /// and the {@link #Score(HitCollector)} method should not be used.
+               /// </summary>
+               /// <param name="doc">The document number for the explanation.
+               /// </param>
+               public override Explanation Explain(int doc)
+               {
+                       TermQuery query = (TermQuery) weight.GetQuery();
+                       Explanation tfExplanation = new Explanation();
+                       int tf = 0;
+                       while (pointer < pointerMax)
+                       {
+                               if (docs[pointer] == doc)
+                                       tf = freqs[pointer];
+                               pointer++;
+                       }
+                       if (tf == 0)
+                       {
+                               if (termDocs.SkipTo(doc))
+                               {
+                                       if (termDocs.Doc() == doc)
+                                       {
+                                               tf = termDocs.Freq();
+                                       }
+                               }
+                       }
+                       termDocs.Close();
+                       tfExplanation.SetValue(GetSimilarity().Tf(tf));
+                       tfExplanation.SetDescription("tf(termFreq(" + query.GetTerm() + ")=" + tf + ")");
+                       
+                       return tfExplanation;
+               }
+               
+               /// <summary>Returns a string representation of this <code>TermScorer</code>. </summary>
+               public override System.String ToString()
+               {
+                       return "scorer(" + weight + ")";
+               }
+               static TermScorer()
+               {
+                       SIM_NORM_DECODER = Similarity.GetNormDecoder();
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/TimeLimitedCollector.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/TimeLimitedCollector.cs
new file mode 100644 (file)
index 0000000..ad8b48f
--- /dev/null
@@ -0,0 +1,233 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary> <p/>
+       /// The TimeLimitedCollector is used to timeout search requests that take longer
+       /// than the maximum allowed search time limit. After this time is exceeded, the
+       /// search thread is stopped by throwing a TimeExceeded Exception.
+       /// <p/>
+       /// 
+       /// </summary>
+       /// <deprecated> Use {@link TimeLimitingCollector} instead, which extends the new
+       /// {@link Collector}. This class will be removed in 3.0.
+       /// </deprecated>
+    [Obsolete("Use TimeLimitingCollector instead, which extends the new Collector. This class will be removed in 3.0.")]
+       public class TimeLimitedCollector:HitCollector
+       {
+               private void  InitBlock()
+               {
+                       greedy = DEFAULT_GREEDY;
+               }
+               
+               /// <summary> Default timer resolution.</summary>
+               /// <seealso cref="SetResolution(long)">
+               /// </seealso>
+               public const int DEFAULT_RESOLUTION = 20;
+               
+               /// <summary> Default for {@link #IsGreedy()}.</summary>
+               /// <seealso cref="IsGreedy()">
+               /// </seealso>
+               public bool DEFAULT_GREEDY = false;
+               
+               private static uint resolution = DEFAULT_RESOLUTION;
+               
+               private bool greedy;
+               
+               private class TimerThread:SupportClass.ThreadClass
+               {
+                       
+                       // NOTE: we can avoid explicit synchronization here for several reasons:
+                       // * updates to volatile long variables are atomic
+                       // * only single thread modifies this value
+                       // * use of volatile keyword ensures that it does not reside in
+                       //   a register, but in main memory (so that changes are visible to
+                       //   other threads).
+                       // * visibility of changes does not need to be instantanous, we can
+                       //   afford losing a tick or two.
+                       //
+                       // See section 17 of the Java Language Specification for details.
+                       private volatile uint time = 0;
+                       
+                       /// <summary> TimerThread provides a pseudo-clock service to all searching
+                       /// threads, so that they can count elapsed time with less overhead
+                       /// than repeatedly calling System.currentTimeMillis.  A single
+                       /// thread should be created to be used for all searches.
+                       /// </summary>
+                       internal TimerThread():base("TimeLimitedCollector timer thread")
+                       {
+                               this.IsBackground = true;
+                       }
+                       
+                       override public void  Run()
+                       {
+                               while (true)
+                               {
+                                       // TODO: Use System.nanoTime() when Lucene moves to Java SE 5.
+                                       time += Mono.Lucene.Net.Search.TimeLimitedCollector.resolution;
+                                       try
+                                       {
+                                               System.Threading.Thread.Sleep(new System.TimeSpan((System.Int64) 10000 * Mono.Lucene.Net.Search.TimeLimitedCollector.resolution));
+                                       }
+                                       catch (System.Threading.ThreadInterruptedException ie)
+                                       {
+                                               SupportClass.ThreadClass.Current().Interrupt();
+                                               throw new System.SystemException(ie.Message, ie);
+                                       }
+                               }
+                       }
+                       
+                       /// <summary> Get the timer value in milliseconds.</summary>
+                       public virtual long GetMilliseconds()
+                       {
+                               return time;
+                       }
+               }
+               
+               /// <summary> Thrown when elapsed search time exceeds allowed search time. </summary>
+               [Serializable]
+               public class TimeExceededException:System.SystemException
+               {
+                       private long timeAllowed;
+                       private long timeElapsed;
+                       private int lastDocCollected;
+                       internal TimeExceededException(long timeAllowed, long timeElapsed, int lastDocCollected):base("Elapsed time: " + timeElapsed + "Exceeded allowed search time: " + timeAllowed + " ms.")
+                       {
+                               this.timeAllowed = timeAllowed;
+                               this.timeElapsed = timeElapsed;
+                               this.lastDocCollected = lastDocCollected;
+                       }
+                       /// <summary> Returns allowed time (milliseconds).</summary>
+                       public virtual long GetTimeAllowed()
+                       {
+                               return timeAllowed;
+                       }
+                       /// <summary> Returns elapsed time (milliseconds).</summary>
+                       public virtual long GetTimeElapsed()
+                       {
+                               return timeElapsed;
+                       }
+                       /// <summary> Returns last doc that was collected when the search time exceeded.  </summary>
+                       public virtual int GetLastDocCollected()
+                       {
+                               return lastDocCollected;
+                       }
+               }
+               
+               // Declare and initialize a single static timer thread to be used by
+               // all TimeLimitedCollector instances.  The JVM assures that
+               // this only happens once.
+               private static readonly TimerThread TIMER_THREAD = new TimerThread();
+               
+               private long t0;
+               private long timeout;
+               private HitCollector hc;
+               
+               /// <summary> Create a TimeLimitedCollector wrapper over another HitCollector with a specified timeout.</summary>
+               /// <param name="hc">the wrapped HitCollector
+               /// </param>
+               /// <param name="timeAllowed">max time allowed for collecting hits after which {@link TimeExceededException} is thrown
+               /// </param>
+               public TimeLimitedCollector(HitCollector hc, long timeAllowed)
+               {
+                       InitBlock();
+                       this.hc = hc;
+                       t0 = TIMER_THREAD.GetMilliseconds();
+                       this.timeout = t0 + timeAllowed;
+               }
+               
+               /// <summary> Calls collect() on the decorated HitCollector.
+               /// 
+               /// </summary>
+               /// <throws>  TimeExceededException if the time allowed has been exceeded. </throws>
+               public override void  Collect(int doc, float score)
+               {
+                       long time = TIMER_THREAD.GetMilliseconds();
+                       if (timeout < time)
+                       {
+                               if (greedy)
+                               {
+                                       //System.out.println(this+"  greedy: before failing, collecting doc: "+doc+"  "+(time-t0));
+                                       hc.Collect(doc, score);
+                               }
+                               //System.out.println(this+"  failing on:  "+doc+"  "+(time-t0));
+                               throw new TimeExceededException(timeout - t0, time - t0, doc);
+                       }
+                       //System.out.println(this+"  collecting: "+doc+"  "+(time-t0));
+                       hc.Collect(doc, score);
+               }
+               
+               /// <summary> Return the timer resolution.</summary>
+               /// <seealso cref="SetResolution(long)">
+               /// </seealso>
+               public static long GetResolution()
+               {
+                       return resolution;
+               }
+               
+               /// <summary> Set the timer resolution.
+               /// The default timer resolution is 20 milliseconds. 
+               /// This means that a search required to take no longer than 
+               /// 800 milliseconds may be stopped after 780 to 820 milliseconds.
+               /// <br/>Note that: 
+               /// <ul>
+               /// <li>Finer (smaller) resolution is more accurate but less efficient.</li>
+               /// <li>Setting resolution to less than 5 milliseconds will be silently modified to 5 milliseconds.</li>
+               /// <li>Setting resolution smaller than current resolution might take effect only after current 
+               /// resolution. (Assume current resolution of 20 milliseconds is modified to 5 milliseconds, 
+               /// then it can take up to 20 milliseconds for the change to have effect.</li>
+               /// </ul>      
+               /// </summary>
+               public static void  SetResolution(uint newResolution)
+               {
+                       resolution = System.Math.Max(newResolution, 5); // 5 milliseconds is about the minimum reasonable time for a Object.wait(long) call.
+               }
+               
+               /// <summary> Checks if this time limited collector is greedy in collecting the last hit.
+               /// A non greedy collector, upon a timeout, would throw a {@link TimeExceededException} 
+               /// without allowing the wrapped collector to collect current doc. A greedy one would 
+               /// first allow the wrapped hit collector to collect current doc and only then 
+               /// throw a {@link TimeExceededException}.
+               /// </summary>
+               /// <seealso cref="SetGreedy(boolean)">
+               /// </seealso>
+               public virtual bool IsGreedy()
+               {
+                       return greedy;
+               }
+               
+               /// <summary> Sets whether this time limited collector is greedy.</summary>
+               /// <param name="greedy">true to make this time limited greedy
+               /// </param>
+               /// <seealso cref="IsGreedy()">
+               /// </seealso>
+               public virtual void  SetGreedy(bool greedy)
+               {
+                       this.greedy = greedy;
+               }
+               static TimeLimitedCollector()
+               {
+                       {
+                               TIMER_THREAD.Start();
+                       }
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/TimeLimitingCollector.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/TimeLimitingCollector.cs
new file mode 100644 (file)
index 0000000..79545e8
--- /dev/null
@@ -0,0 +1,250 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary> The {@link TimeLimitingCollector} is used to timeout search requests that
+       /// take longer than the maximum allowed search time limit. After this time is
+       /// exceeded, the search thread is stopped by throwing a
+       /// {@link TimeExceededException}.
+       /// </summary>
+       public class TimeLimitingCollector:Collector
+       {
+               private void  InitBlock()
+               {
+                       greedy = DEFAULT_GREEDY;
+               }
+               
+               /// <summary> Default timer resolution.</summary>
+               /// <seealso cref="SetResolution(long)">
+               /// </seealso>
+               public const uint DEFAULT_RESOLUTION = 20;
+               
+               /// <summary> Default for {@link #IsGreedy()}.</summary>
+               /// <seealso cref="IsGreedy()">
+               /// </seealso>
+               public bool DEFAULT_GREEDY = false;
+               
+               private static uint resolution = DEFAULT_RESOLUTION;
+               
+               private bool greedy;
+               
+               private sealed class TimerThread:SupportClass.ThreadClass
+               {
+                       
+                       // NOTE: we can avoid explicit synchronization here for several reasons:
+                       // * updates to volatile long variables are atomic
+                       // * only single thread modifies this value
+                       // * use of volatile keyword ensures that it does not reside in
+                       //   a register, but in main memory (so that changes are visible to
+                       //   other threads).
+                       // * visibility of changes does not need to be instantanous, we can
+                       //   afford losing a tick or two.
+                       //
+                       // See section 17 of the Java Language Specification for details.
+                       private volatile uint time = 0;
+                       
+                       /// <summary> TimerThread provides a pseudo-clock service to all searching
+                       /// threads, so that they can count elapsed time with less overhead
+                       /// than repeatedly calling System.currentTimeMillis.  A single
+                       /// thread should be created to be used for all searches.
+                       /// </summary>
+                       internal TimerThread():base("TimeLimitedCollector timer thread")
+                       {
+                               this.IsBackground = true;
+                       }
+                       
+                       override public void  Run()
+                       {
+                               while (true)
+                               {
+                                       // TODO: Use System.nanoTime() when Lucene moves to Java SE 5.
+                                       time += Mono.Lucene.Net.Search.TimeLimitingCollector.resolution;
+                                       try
+                                       {
+                                               System.Threading.Thread.Sleep(new System.TimeSpan((System.Int64) 10000 * Mono.Lucene.Net.Search.TimeLimitingCollector.resolution));
+                                       }
+                                       catch (System.Threading.ThreadInterruptedException ie)
+                                       {
+                                               SupportClass.ThreadClass.Current().Interrupt();
+                                               throw new System.SystemException(ie.Message, ie);
+                                       }
+                               }
+                       }
+                       
+                       /// <summary> Get the timer value in milliseconds.</summary>
+                       public long GetMilliseconds()
+                       {
+                               return time;
+                       }
+               }
+               
+               /// <summary>Thrown when elapsed search time exceeds allowed search time. </summary>
+               [Serializable]
+               public class TimeExceededException:System.SystemException
+               {
+                       private long timeAllowed;
+                       private long timeElapsed;
+                       private int lastDocCollected;
+                       internal TimeExceededException(long timeAllowed, long timeElapsed, int lastDocCollected):base("Elapsed time: " + timeElapsed + "Exceeded allowed search time: " + timeAllowed + " ms.")
+                       {
+                               this.timeAllowed = timeAllowed;
+                               this.timeElapsed = timeElapsed;
+                               this.lastDocCollected = lastDocCollected;
+                       }
+                       /// <summary>Returns allowed time (milliseconds). </summary>
+                       public virtual long GetTimeAllowed()
+                       {
+                               return timeAllowed;
+                       }
+                       /// <summary>Returns elapsed time (milliseconds). </summary>
+                       public virtual long GetTimeElapsed()
+                       {
+                               return timeElapsed;
+                       }
+            /// <summary>Returns last doc(absolute doc id) that was collected when the search time exceeded. </summary>
+                       public virtual int GetLastDocCollected()
+                       {
+                               return lastDocCollected;
+                       }
+               }
+               
+               // Declare and initialize a single static timer thread to be used by
+               // all TimeLimitedCollector instances.  The JVM assures that
+               // this only happens once.
+               private static readonly TimerThread TIMER_THREAD = new TimerThread();
+               
+               private long t0;
+               private long timeout;
+               private Collector collector;
+
+        private int docBase;
+               
+               /// <summary> Create a TimeLimitedCollector wrapper over another {@link Collector} with a specified timeout.</summary>
+               /// <param name="collector">the wrapped {@link Collector}
+               /// </param>
+               /// <param name="timeAllowed">max time allowed for collecting hits after which {@link TimeExceededException} is thrown
+               /// </param>
+               public TimeLimitingCollector(Collector collector, long timeAllowed)
+               {
+                       InitBlock();
+                       this.collector = collector;
+                       t0 = TIMER_THREAD.GetMilliseconds();
+                       this.timeout = t0 + timeAllowed;
+               }
+               
+               /// <summary> Return the timer resolution.</summary>
+               /// <seealso cref="SetResolution(long)">
+               /// </seealso>
+               public static long GetResolution()
+               {
+                       return resolution;
+               }
+               
+               /// <summary> Set the timer resolution.
+               /// The default timer resolution is 20 milliseconds. 
+               /// This means that a search required to take no longer than 
+               /// 800 milliseconds may be stopped after 780 to 820 milliseconds.
+               /// <br/>Note that: 
+               /// <ul>
+               /// <li>Finer (smaller) resolution is more accurate but less efficient.</li>
+               /// <li>Setting resolution to less than 5 milliseconds will be silently modified to 5 milliseconds.</li>
+               /// <li>Setting resolution smaller than current resolution might take effect only after current 
+               /// resolution. (Assume current resolution of 20 milliseconds is modified to 5 milliseconds, 
+               /// then it can take up to 20 milliseconds for the change to have effect.</li>
+               /// </ul>      
+               /// </summary>
+               public static void  SetResolution(uint newResolution)
+               {
+                       resolution = System.Math.Max(newResolution, 5); // 5 milliseconds is about the minimum reasonable time for a Object.wait(long) call.
+               }
+               
+               /// <summary> Checks if this time limited collector is greedy in collecting the last hit.
+               /// A non greedy collector, upon a timeout, would throw a {@link TimeExceededException} 
+               /// without allowing the wrapped collector to collect current doc. A greedy one would 
+               /// first allow the wrapped hit collector to collect current doc and only then 
+               /// throw a {@link TimeExceededException}.
+               /// </summary>
+               /// <seealso cref="SetGreedy(boolean)">
+               /// </seealso>
+               public virtual bool IsGreedy()
+               {
+                       return greedy;
+               }
+               
+               /// <summary> Sets whether this time limited collector is greedy.</summary>
+               /// <param name="greedy">true to make this time limited greedy
+               /// </param>
+               /// <seealso cref="IsGreedy()">
+               /// </seealso>
+               public virtual void  SetGreedy(bool greedy)
+               {
+                       this.greedy = greedy;
+               }
+               
+               /// <summary> Calls {@link Collector#Collect(int)} on the decorated {@link Collector}
+               /// unless the allowed time has passed, in which case it throws an exception.
+               /// 
+               /// </summary>
+               /// <throws>  TimeExceededException </throws>
+               /// <summary>           if the time allowed has exceeded.
+               /// </summary>
+               public override void  Collect(int doc)
+               {
+                       long time = TIMER_THREAD.GetMilliseconds();
+                       if (timeout < time)
+                       {
+                               if (greedy)
+                               {
+                                       //System.out.println(this+"  greedy: before failing, collecting doc: "+doc+"  "+(time-t0));
+                                       collector.Collect(doc);
+                               }
+                               //System.out.println(this+"  failing on:  "+doc+"  "+(time-t0));
+                throw new TimeExceededException(timeout - t0, time - t0, docBase + doc);
+                       }
+                       //System.out.println(this+"  collecting: "+doc+"  "+(time-t0));
+                       collector.Collect(doc);
+               }
+               
+               public override void  SetNextReader(IndexReader reader, int base_Renamed)
+               {
+                       collector.SetNextReader(reader, base_Renamed);
+            this.docBase = base_Renamed;
+               }
+               
+               public override void  SetScorer(Scorer scorer)
+               {
+                       collector.SetScorer(scorer);
+               }
+               
+               public override bool AcceptsDocsOutOfOrder()
+               {
+                       return collector.AcceptsDocsOutOfOrder();
+               }
+               static TimeLimitingCollector()
+               {
+                       {
+                               TIMER_THREAD.Start();
+                       }
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/TopDocCollector.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/TopDocCollector.cs
new file mode 100644 (file)
index 0000000..ee0d4f8
--- /dev/null
@@ -0,0 +1,119 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using PriorityQueue = Mono.Lucene.Net.Util.PriorityQueue;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary>A {@link HitCollector} implementation that collects the top-scoring
+       /// documents, returning them as a {@link TopDocs}.  This is used by {@link
+       /// IndexSearcher} to implement {@link TopDocs}-based search.
+       /// 
+       /// <p/>This may be extended, overriding the collect method to, e.g.,
+       /// conditionally invoke <code>super()</code> in order to filter which
+       /// documents are collected.
+       /// 
+       /// </summary>
+       /// <deprecated> Please use {@link TopScoreDocCollector}
+       /// instead, which has better performance.
+       /// 
+       /// </deprecated>
+    [Obsolete("Please use TopScoreDocCollector instead, which has better performance.")]
+       public class TopDocCollector:HitCollector
+       {
+               
+               private ScoreDoc reusableSD;
+               
+               /// <summary>The total number of hits the collector encountered. </summary>
+               protected internal int totalHits;
+               
+               /// <summary>The priority queue which holds the top-scoring documents. </summary>
+               protected internal PriorityQueue hq;
+               
+               /// <summary>Construct to collect a given number of hits.</summary>
+               /// <param name="numHits">the maximum number of hits to collect
+               /// </param>
+               public TopDocCollector(int numHits):this(new HitQueue(numHits, false))
+               {
+               }
+               
+               /// <deprecated> use TopDocCollector(hq) instead. numHits is not used by this
+               /// constructor. It will be removed in a future release.
+               /// </deprecated>
+        [Obsolete("use TopDocCollector(hq) instead. numHits is not used by this constructor. It will be removed in a future release.")]
+               internal TopDocCollector(int numHits, PriorityQueue hq)
+               {
+                       this.hq = hq;
+               }
+               
+               /// <summary>Constructor to collect the top-scoring documents by using the given PQ.</summary>
+               /// <param name="hq">the PQ to use by this instance.
+               /// </param>
+               protected internal TopDocCollector(PriorityQueue hq)
+               {
+                       this.hq = hq;
+               }
+               
+               // javadoc inherited
+               public override void  Collect(int doc, float score)
+               {
+                       if (score > 0.0f)
+                       {
+                               totalHits++;
+                               if (reusableSD == null)
+                               {
+                                       reusableSD = new ScoreDoc(doc, score);
+                               }
+                               else if (score >= reusableSD.score)
+                               {
+                                       // reusableSD holds the last "rejected" entry, so, if
+                                       // this new score is not better than that, there's no
+                                       // need to try inserting it
+                                       reusableSD.doc = doc;
+                                       reusableSD.score = score;
+                               }
+                               else
+                               {
+                                       return ;
+                               }
+                               reusableSD = (ScoreDoc) hq.InsertWithOverflow(reusableSD);
+                       }
+               }
+               
+               /// <summary>The total number of documents that matched this query. </summary>
+               public virtual int GetTotalHits()
+               {
+                       return totalHits;
+               }
+               
+               /// <summary>The top-scoring hits. </summary>
+               public virtual TopDocs TopDocs()
+               {
+                       ScoreDoc[] scoreDocs = new ScoreDoc[hq.Size()];
+                       for (int i = hq.Size() - 1; i >= 0; i--)
+                       // put docs in array
+                               scoreDocs[i] = (ScoreDoc) hq.Pop();
+                       
+                       float maxScore = (totalHits == 0)?System.Single.NegativeInfinity:scoreDocs[0].score;
+                       
+                       return new TopDocs(totalHits, scoreDocs, maxScore);
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/TopDocs.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/TopDocs.cs
new file mode 100644 (file)
index 0000000..afd4c7b
--- /dev/null
@@ -0,0 +1,88 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary> Represents hits returned by {@link
+    /// * Searcher#search(Query,Filter,int)} and {@link
+    /// * Searcher#search(Query,int)
+    /// </summary>
+       [Serializable]
+       public class TopDocs
+       {
+               /// <summary>The total number of hits for the query.</summary>
+               /// <seealso cref="Hits.Length()">
+               /// </seealso>
+        [Obsolete("For backward compatibility. Use TotalHits instead")]
+               public int totalHits;
+               /// <summary>The top hits for the query. </summary>
+        [Obsolete("For backward compatibility. Use ScoreDocs instead")]
+               public ScoreDoc[] scoreDocs;
+               /// <summary>Stores the maximum score value encountered, needed for normalizing. </summary>
+        [Obsolete("For backward compatibility. Use MaxScore instead")]
+               private float maxScore;
+
+        public int TotalHits
+        {
+            get { return totalHits; }
+            set { totalHits = value; }
+        }
+
+        public ScoreDoc[] ScoreDocs
+        {
+            get { return scoreDocs; }
+            set { scoreDocs = value; }
+        }
+
+        public float MaxScore
+        {
+            get { return maxScore; }
+            set { maxScore = value; }
+        }
+
+
+               /// <summary>Returns the maximum score value encountered. Note that in case
+               /// scores are not tracked, this returns {@link Float#NaN}.
+               /// </summary>
+               public virtual float GetMaxScore()
+               {
+                       return MaxScore;
+               }
+               
+               /// <summary>Sets the maximum score value encountered. </summary>
+               public virtual void  SetMaxScore(float maxScore)
+               {
+                       this.MaxScore = maxScore;
+               }
+               
+               /// <summary>Constructs a TopDocs with a default maxScore=Float.NaN. </summary>
+               internal TopDocs(int totalHits, ScoreDoc[] scoreDocs):this(totalHits, scoreDocs, System.Single.NaN)
+               {
+               }
+               
+               /// <summary></summary>
+               public TopDocs(int totalHits, ScoreDoc[] scoreDocs, float maxScore)
+               {
+                       this.TotalHits = totalHits;
+                       this.ScoreDocs = scoreDocs;
+                       this.MaxScore = maxScore;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/TopDocsCollector.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/TopDocsCollector.cs
new file mode 100644 (file)
index 0000000..6d0c543
--- /dev/null
@@ -0,0 +1,157 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using PriorityQueue = Mono.Lucene.Net.Util.PriorityQueue;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary> A base class for all collectors that return a {@link TopDocs} output. This
+       /// collector allows easy extension by providing a single constructor which
+       /// accepts a {@link PriorityQueue} as well as protected members for that
+       /// priority queue and a counter of the number of total hits.<br/>
+       /// Extending classes can override {@link #TopDocs(int, int)} and
+       /// {@link #GetTotalHits()} in order to provide their own implementation.
+       /// </summary>
+       public abstract class TopDocsCollector:Collector
+       {
+               
+               // This is used in case topDocs() is called with illegal parameters, or there
+               // simply aren't (enough) results.
+               protected internal static readonly TopDocs EMPTY_TOPDOCS = new TopDocs(0, new ScoreDoc[0], System.Single.NaN);
+               
+               /// <summary> The priority queue which holds the top documents. Note that different
+               /// implementations of PriorityQueue give different meaning to 'top documents'.
+               /// HitQueue for example aggregates the top scoring documents, while other PQ
+               /// implementations may hold documents sorted by other criteria.
+               /// </summary>
+               protected internal PriorityQueue pq;
+               
+               /// <summary>The total number of documents that the collector encountered. </summary>
+               protected internal int totalHits;
+               
+               protected internal TopDocsCollector(PriorityQueue pq)
+               {
+                       this.pq = pq;
+               }
+               
+               /// <summary> Populates the results array with the ScoreDoc instaces. This can be
+               /// overridden in case a different ScoreDoc type should be returned.
+               /// </summary>
+               protected internal virtual void  PopulateResults(ScoreDoc[] results, int howMany)
+               {
+                       for (int i = howMany - 1; i >= 0; i--)
+                       {
+                               results[i] = (ScoreDoc) pq.Pop();
+                       }
+               }
+               
+               /// <summary> Returns a {@link TopDocs} instance containing the given results. If
+               /// <code>results</code> is null it means there are no results to return,
+               /// either because there were 0 calls to collect() or because the arguments to
+               /// topDocs were invalid.
+               /// </summary>
+               public /*protected internal*/ virtual TopDocs NewTopDocs(ScoreDoc[] results, int start)
+               {
+                       return results == null?EMPTY_TOPDOCS:new TopDocs(totalHits, results);
+               }
+               
+               /// <summary>The total number of documents that matched this query. </summary>
+               public virtual int GetTotalHits()
+               {
+                       return totalHits;
+               }
+               
+               /// <summary>Returns the top docs that were collected by this collector. </summary>
+               public TopDocs TopDocs()
+               {
+                       // In case pq was populated with sentinel values, there might be less
+                       // results than pq.size(). Therefore return all results until either
+                       // pq.size() or totalHits.
+                       return TopDocs(0, totalHits < pq.Size()?totalHits:pq.Size());
+               }
+               
+               /// <summary> Returns the documents in the rage [start .. pq.size()) that were collected
+               /// by this collector. Note that if start >= pq.size(), an empty TopDocs is
+               /// returned.<br/>
+               /// This method is convenient to call if the application allways asks for the
+               /// last results, starting from the last 'page'.<br/>
+               /// <b>NOTE:</b> you cannot call this method more than once for each search
+               /// execution. If you need to call it more than once, passing each time a
+               /// different <code>start</code>, you should call {@link #TopDocs()} and work
+               /// with the returned {@link TopDocs} object, which will contain all the
+               /// results this search execution collected.
+               /// </summary>
+               public TopDocs TopDocs(int start)
+               {
+                       // In case pq was populated with sentinel values, there might be less
+                       // results than pq.size(). Therefore return all results until either
+                       // pq.size() or totalHits.
+                       return TopDocs(start, totalHits < pq.Size()?totalHits:pq.Size());
+               }
+               
+               /// <summary> Returns the documents in the rage [start .. start+howMany) that were
+               /// collected by this collector. Note that if start >= pq.size(), an empty
+               /// TopDocs is returned, and if pq.size() - start &lt; howMany, then only the
+               /// available documents in [start .. pq.size()) are returned.<br/>
+               /// This method is useful to call in case pagination of search results is
+               /// allowed by the search application, as well as it attempts to optimize the
+               /// memory used by allocating only as much as requested by howMany.<br/>
+               /// <b>NOTE:</b> you cannot call this method more than once for each search
+               /// execution. If you need to call it more than once, passing each time a
+               /// different range, you should call {@link #TopDocs()} and work with the
+               /// returned {@link TopDocs} object, which will contain all the results this
+               /// search execution collected.
+               /// </summary>
+               public TopDocs TopDocs(int start, int howMany)
+               {
+                       
+                       // In case pq was populated with sentinel values, there might be less
+                       // results than pq.size(). Therefore return all results until either
+                       // pq.size() or totalHits.
+                       int size = totalHits < pq.Size()?totalHits:pq.Size();
+                       
+                       // Don't bother to throw an exception, just return an empty TopDocs in case
+                       // the parameters are invalid or out of range.
+                       if (start < 0 || start >= size || howMany <= 0)
+                       {
+                               return NewTopDocs(null, start);
+                       }
+                       
+                       // We know that start < pqsize, so just fix howMany. 
+                       howMany = System.Math.Min(size - start, howMany);
+                       ScoreDoc[] results = new ScoreDoc[howMany];
+                       
+                       // pq's pop() returns the 'least' element in the queue, therefore need
+                       // to discard the first ones, until we reach the requested range.
+                       // Note that this loop will usually not be executed, since the common usage
+                       // should be that the caller asks for the last howMany results. However it's
+                       // needed here for completeness.
+                       for (int i = pq.Size() - start - howMany; i > 0; i--)
+                       {
+                               pq.Pop();
+                       }
+                       
+                       // Get the requested results from pq.
+                       PopulateResults(results, howMany);
+                       
+                       return NewTopDocs(results, start);
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/TopFieldCollector.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/TopFieldCollector.cs
new file mode 100644 (file)
index 0000000..ecca1d3
--- /dev/null
@@ -0,0 +1,1140 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+using PriorityQueue = Mono.Lucene.Net.Util.PriorityQueue;
+using Entry = Mono.Lucene.Net.Search.FieldValueHitQueue.Entry;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary> A {@link Collector} that sorts by {@link SortField} using
+       /// {@link FieldComparator}s.
+       /// <p/>
+       /// See the {@link #create(Mono.Lucene.Net.Search.Sort, int, boolean, boolean, boolean, boolean)} method
+       /// for instantiating a TopFieldCollector.
+       /// 
+       /// <p/><b>NOTE:</b> This API is experimental and might change in
+       /// incompatible ways in the next release.<p/>
+       /// </summary>
+       public abstract class TopFieldCollector:TopDocsCollector
+       {
+               
+               // TODO: one optimization we could do is to pre-fill
+               // the queue with sentinel value that guaranteed to
+               // always compare lower than a real hit; this would
+               // save having to check queueFull on each insert
+               
+               /*
+               * Implements a TopFieldCollector over one SortField criteria, without
+               * tracking document scores and maxScore.
+               */
+               private class OneComparatorNonScoringCollector:TopFieldCollector
+               {
+                       
+                       internal FieldComparator comparator;
+                       internal int reverseMul;
+                       
+                       public OneComparatorNonScoringCollector(FieldValueHitQueue queue, int numHits, bool fillFields):base(queue, numHits, fillFields)
+                       {
+                               comparator = queue.GetComparators()[0];
+                               reverseMul = queue.GetReverseMul()[0];
+                       }
+                       
+                       internal void  UpdateBottom(int doc)
+                       {
+                               // bottom.score is already set to Float.NaN in add().
+                               bottom.docID = docBase + doc;
+                               bottom = (Entry) pq.UpdateTop();
+                       }
+                       
+                       public override void  Collect(int doc)
+                       {
+                               ++totalHits;
+                               if (queueFull)
+                               {
+                                       if ((reverseMul * comparator.CompareBottom(doc)) <= 0)
+                                       {
+                                               // since docs are visited in doc Id order, if compare is 0, it means
+                                               // this document is largest than anything else in the queue, and
+                                               // therefore not competitive.
+                                               return ;
+                                       }
+                                       
+                                       // This hit is competitive - replace bottom element in queue & adjustTop
+                                       comparator.Copy(bottom.slot, doc);
+                                       UpdateBottom(doc);
+                                       comparator.SetBottom(bottom.slot);
+                               }
+                               else
+                               {
+                                       // Startup transient: queue hasn't gathered numHits yet
+                                       int slot = totalHits - 1;
+                                       // Copy hit into queue
+                                       comparator.Copy(slot, doc);
+                                       Add(slot, doc, System.Single.NaN);
+                                       if (queueFull)
+                                       {
+                                               comparator.SetBottom(bottom.slot);
+                                       }
+                               }
+                       }
+                       
+                       public override void  SetNextReader(IndexReader reader, int docBase)
+                       {
+                               this.docBase = docBase;
+                               comparator.SetNextReader(reader, docBase);
+                       }
+                       
+                       public override void  SetScorer(Scorer scorer)
+                       {
+                               comparator.SetScorer(scorer);
+                       }
+               }
+               
+               /*
+               * Implements a TopFieldCollector over one SortField criteria, without
+               * tracking document scores and maxScore, and assumes out of orderness in doc
+               * Ids collection.
+               */
+               private class OutOfOrderOneComparatorNonScoringCollector:OneComparatorNonScoringCollector
+               {
+                       
+                       public OutOfOrderOneComparatorNonScoringCollector(FieldValueHitQueue queue, int numHits, bool fillFields):base(queue, numHits, fillFields)
+                       {
+                       }
+                       
+                       public override void  Collect(int doc)
+                       {
+                               ++totalHits;
+                               if (queueFull)
+                               {
+                                       // Fastmatch: return if this hit is not competitive
+                                       int cmp = reverseMul * comparator.CompareBottom(doc);
+                                       if (cmp < 0 || (cmp == 0 && doc + docBase > bottom.docID))
+                                       {
+                                               return ;
+                                       }
+                                       
+                                       // This hit is competitive - replace bottom element in queue & adjustTop
+                                       comparator.Copy(bottom.slot, doc);
+                                       UpdateBottom(doc);
+                                       comparator.SetBottom(bottom.slot);
+                               }
+                               else
+                               {
+                                       // Startup transient: queue hasn't gathered numHits yet
+                                       int slot = totalHits - 1;
+                                       // Copy hit into queue
+                                       comparator.Copy(slot, doc);
+                                       Add(slot, doc, System.Single.NaN);
+                                       if (queueFull)
+                                       {
+                                               comparator.SetBottom(bottom.slot);
+                                       }
+                               }
+                       }
+                       
+                       public override bool AcceptsDocsOutOfOrder()
+                       {
+                               return true;
+                       }
+               }
+               
+               /*
+               * Implements a TopFieldCollector over one SortField criteria, while tracking
+               * document scores but no maxScore.
+               */
+               private class OneComparatorScoringNoMaxScoreCollector:OneComparatorNonScoringCollector
+               {
+                       
+                       internal Scorer scorer;
+                       
+                       public OneComparatorScoringNoMaxScoreCollector(FieldValueHitQueue queue, int numHits, bool fillFields):base(queue, numHits, fillFields)
+                       {
+                       }
+                       
+                       internal void  updateBottom(int doc, float score)
+                       {
+                               bottom.docID = docBase + doc;
+                               bottom.score = score;
+                               bottom = (Entry) pq.UpdateTop();
+                       }
+                       
+                       public override void  Collect(int doc)
+                       {
+                               ++totalHits;
+                               if (queueFull)
+                               {
+                                       if ((reverseMul * comparator.CompareBottom(doc)) <= 0)
+                                       {
+                                               // since docs are visited in doc Id order, if compare is 0, it means
+                                               // this document is largest than anything else in the queue, and
+                                               // therefore not competitive.
+                                               return ;
+                                       }
+                                       
+                                       // Compute the score only if the hit is competitive.
+                                       float score = scorer.Score();
+                                       
+                                       // This hit is competitive - replace bottom element in queue & adjustTop
+                                       comparator.Copy(bottom.slot, doc);
+                                       updateBottom(doc, score);
+                                       comparator.SetBottom(bottom.slot);
+                               }
+                               else
+                               {
+                                       // Compute the score only if the hit is competitive.
+                                       float score = scorer.Score();
+                                       
+                                       // Startup transient: queue hasn't gathered numHits yet
+                                       int slot = totalHits - 1;
+                                       // Copy hit into queue
+                                       comparator.Copy(slot, doc);
+                                       Add(slot, doc, score);
+                                       if (queueFull)
+                                       {
+                                               comparator.SetBottom(bottom.slot);
+                                       }
+                               }
+                       }
+                       
+                       public override void  SetScorer(Scorer scorer)
+                       {
+                               this.scorer = scorer;
+                               comparator.SetScorer(scorer);
+                       }
+               }
+               
+               /*
+               * Implements a TopFieldCollector over one SortField criteria, while tracking
+               * document scores but no maxScore, and assumes out of orderness in doc Ids
+               * collection.
+               */
+               private class OutOfOrderOneComparatorScoringNoMaxScoreCollector:OneComparatorScoringNoMaxScoreCollector
+               {
+                       
+                       public OutOfOrderOneComparatorScoringNoMaxScoreCollector(FieldValueHitQueue queue, int numHits, bool fillFields):base(queue, numHits, fillFields)
+                       {
+                       }
+                       
+                       public override void  Collect(int doc)
+                       {
+                               ++totalHits;
+                               if (queueFull)
+                               {
+                                       // Fastmatch: return if this hit is not competitive
+                                       int cmp = reverseMul * comparator.CompareBottom(doc);
+                                       if (cmp < 0 || (cmp == 0 && doc + docBase > bottom.docID))
+                                       {
+                                               return ;
+                                       }
+                                       
+                                       // Compute the score only if the hit is competitive.
+                                       float score = scorer.Score();
+                                       
+                                       // This hit is competitive - replace bottom element in queue & adjustTop
+                                       comparator.Copy(bottom.slot, doc);
+                                       updateBottom(doc, score);
+                                       comparator.SetBottom(bottom.slot);
+                               }
+                               else
+                               {
+                                       // Compute the score only if the hit is competitive.
+                                       float score = scorer.Score();
+                                       
+                                       // Startup transient: queue hasn't gathered numHits yet
+                                       int slot = totalHits - 1;
+                                       // Copy hit into queue
+                                       comparator.Copy(slot, doc);
+                                       Add(slot, doc, score);
+                                       if (queueFull)
+                                       {
+                                               comparator.SetBottom(bottom.slot);
+                                       }
+                               }
+                       }
+                       
+                       public override bool AcceptsDocsOutOfOrder()
+                       {
+                               return true;
+                       }
+               }
+               
+               /*
+               * Implements a TopFieldCollector over one SortField criteria, with tracking
+               * document scores and maxScore.
+               */
+               private class OneComparatorScoringMaxScoreCollector:OneComparatorNonScoringCollector
+               {
+                       
+                       internal Scorer scorer;
+                       
+                       public OneComparatorScoringMaxScoreCollector(FieldValueHitQueue queue, int numHits, bool fillFields):base(queue, numHits, fillFields)
+                       {
+                               // Must set maxScore to NEG_INF, or otherwise Math.max always returns NaN.
+                               maxScore = System.Single.NegativeInfinity;
+                       }
+                       
+                       internal void  UpdateBottom(int doc, float score)
+                       {
+                               bottom.docID = docBase + doc;
+                               bottom.score = score;
+                               bottom = (Entry) pq.UpdateTop();
+                       }
+                       
+                       public override void  Collect(int doc)
+                       {
+                               float score = scorer.Score();
+                               if (score > maxScore)
+                               {
+                                       maxScore = score;
+                               }
+                               ++totalHits;
+                               if (queueFull)
+                               {
+                                       if ((reverseMul * comparator.CompareBottom(doc)) <= 0)
+                                       {
+                                               // since docs are visited in doc Id order, if compare is 0, it means
+                                               // this document is largest than anything else in the queue, and
+                                               // therefore not competitive.
+                                               return ;
+                                       }
+                                       
+                                       // This hit is competitive - replace bottom element in queue & adjustTop
+                                       comparator.Copy(bottom.slot, doc);
+                                       UpdateBottom(doc, score);
+                                       comparator.SetBottom(bottom.slot);
+                               }
+                               else
+                               {
+                                       // Startup transient: queue hasn't gathered numHits yet
+                                       int slot = totalHits - 1;
+                                       // Copy hit into queue
+                                       comparator.Copy(slot, doc);
+                                       Add(slot, doc, score);
+                                       if (queueFull)
+                                       {
+                                               comparator.SetBottom(bottom.slot);
+                                       }
+                               }
+                       }
+                       
+                       public override void  SetScorer(Scorer scorer)
+                       {
+                               this.scorer = scorer;
+                               base.SetScorer(scorer);
+                       }
+               }
+               
+               /*
+               * Implements a TopFieldCollector over one SortField criteria, with tracking
+               * document scores and maxScore, and assumes out of orderness in doc Ids
+               * collection.
+               */
+               private class OutOfOrderOneComparatorScoringMaxScoreCollector:OneComparatorScoringMaxScoreCollector
+               {
+                       
+                       public OutOfOrderOneComparatorScoringMaxScoreCollector(FieldValueHitQueue queue, int numHits, bool fillFields):base(queue, numHits, fillFields)
+                       {
+                       }
+                       
+                       public override void  Collect(int doc)
+                       {
+                               float score = scorer.Score();
+                               if (score > maxScore)
+                               {
+                                       maxScore = score;
+                               }
+                               ++totalHits;
+                               if (queueFull)
+                               {
+                                       // Fastmatch: return if this hit is not competitive
+                                       int cmp = reverseMul * comparator.CompareBottom(doc);
+                                       if (cmp < 0 || (cmp == 0 && doc + docBase > bottom.docID))
+                                       {
+                                               return ;
+                                       }
+                                       
+                                       // This hit is competitive - replace bottom element in queue & adjustTop
+                                       comparator.Copy(bottom.slot, doc);
+                                       UpdateBottom(doc, score);
+                                       comparator.SetBottom(bottom.slot);
+                               }
+                               else
+                               {
+                                       // Startup transient: queue hasn't gathered numHits yet
+                                       int slot = totalHits - 1;
+                                       // Copy hit into queue
+                                       comparator.Copy(slot, doc);
+                                       Add(slot, doc, score);
+                                       if (queueFull)
+                                       {
+                                               comparator.SetBottom(bottom.slot);
+                                       }
+                               }
+                       }
+                       
+                       public override bool AcceptsDocsOutOfOrder()
+                       {
+                               return true;
+                       }
+               }
+               
+               /*
+               * Implements a TopFieldCollector over multiple SortField criteria, without
+               * tracking document scores and maxScore.
+               */
+               private class MultiComparatorNonScoringCollector:TopFieldCollector
+               {
+                       
+                       internal FieldComparator[] comparators;
+                       internal int[] reverseMul;
+                       
+                       public MultiComparatorNonScoringCollector(FieldValueHitQueue queue, int numHits, bool fillFields):base(queue, numHits, fillFields)
+                       {
+                               comparators = queue.GetComparators();
+                               reverseMul = queue.GetReverseMul();
+                       }
+                       
+                       internal void  UpdateBottom(int doc)
+                       {
+                               // bottom.score is already set to Float.NaN in add().
+                               bottom.docID = docBase + doc;
+                               bottom = (Entry) pq.UpdateTop();
+                       }
+                       
+                       public override void  Collect(int doc)
+                       {
+                               ++totalHits;
+                               if (queueFull)
+                               {
+                                       // Fastmatch: return if this hit is not competitive
+                                       for (int i = 0; ; i++)
+                                       {
+                                               int c = reverseMul[i] * comparators[i].CompareBottom(doc);
+                                               if (c < 0)
+                                               {
+                                                       // Definitely not competitive.
+                                                       return ;
+                                               }
+                                               else if (c > 0)
+                                               {
+                                                       // Definitely competitive.
+                                                       break;
+                                               }
+                                               else if (i == comparators.Length - 1)
+                                               {
+                                                       // Here c=0. If we're at the last comparator, this doc is not
+                                                       // competitive, since docs are visited in doc Id order, which means
+                                                       // this doc cannot compete with any other document in the queue.
+                                                       return ;
+                                               }
+                                       }
+                                       
+                                       // This hit is competitive - replace bottom element in queue & adjustTop
+                                       for (int i = 0; i < comparators.Length; i++)
+                                       {
+                                               comparators[i].Copy(bottom.slot, doc);
+                                       }
+                                       
+                                       UpdateBottom(doc);
+                                       
+                                       for (int i = 0; i < comparators.Length; i++)
+                                       {
+                                               comparators[i].SetBottom(bottom.slot);
+                                       }
+                               }
+                               else
+                               {
+                                       // Startup transient: queue hasn't gathered numHits yet
+                                       int slot = totalHits - 1;
+                                       // Copy hit into queue
+                                       for (int i = 0; i < comparators.Length; i++)
+                                       {
+                                               comparators[i].Copy(slot, doc);
+                                       }
+                                       Add(slot, doc, System.Single.NaN);
+                                       if (queueFull)
+                                       {
+                                               for (int i = 0; i < comparators.Length; i++)
+                                               {
+                                                       comparators[i].SetBottom(bottom.slot);
+                                               }
+                                       }
+                               }
+                       }
+                       
+                       public override void  SetNextReader(IndexReader reader, int docBase)
+                       {
+                               this.docBase = docBase;
+                               for (int i = 0; i < comparators.Length; i++)
+                               {
+                                       comparators[i].SetNextReader(reader, docBase);
+                               }
+                       }
+                       
+                       public override void  SetScorer(Scorer scorer)
+                       {
+                               // set the scorer on all comparators
+                               for (int i = 0; i < comparators.Length; i++)
+                               {
+                                       comparators[i].SetScorer(scorer);
+                               }
+                       }
+               }
+               
+               /*
+               * Implements a TopFieldCollector over multiple SortField criteria, without
+               * tracking document scores and maxScore, and assumes out of orderness in doc
+               * Ids collection.
+               */
+               private class OutOfOrderMultiComparatorNonScoringCollector:MultiComparatorNonScoringCollector
+               {
+                       
+                       public OutOfOrderMultiComparatorNonScoringCollector(FieldValueHitQueue queue, int numHits, bool fillFields):base(queue, numHits, fillFields)
+                       {
+                       }
+                       
+                       public override void  Collect(int doc)
+                       {
+                               ++totalHits;
+                               if (queueFull)
+                               {
+                                       // Fastmatch: return if this hit is not competitive
+                                       for (int i = 0; ; i++)
+                                       {
+                                               int c = reverseMul[i] * comparators[i].CompareBottom(doc);
+                                               if (c < 0)
+                                               {
+                                                       // Definitely not competitive.
+                                                       return ;
+                                               }
+                                               else if (c > 0)
+                                               {
+                                                       // Definitely competitive.
+                                                       break;
+                                               }
+                                               else if (i == comparators.Length - 1)
+                                               {
+                                                       // This is the equals case.
+                                                       if (doc + docBase > bottom.docID)
+                                                       {
+                                                               // Definitely not competitive
+                                                               return ;
+                                                       }
+                                                       break;
+                                               }
+                                       }
+                                       
+                                       // This hit is competitive - replace bottom element in queue & adjustTop
+                                       for (int i = 0; i < comparators.Length; i++)
+                                       {
+                                               comparators[i].Copy(bottom.slot, doc);
+                                       }
+                                       
+                                       UpdateBottom(doc);
+                                       
+                                       for (int i = 0; i < comparators.Length; i++)
+                                       {
+                                               comparators[i].SetBottom(bottom.slot);
+                                       }
+                               }
+                               else
+                               {
+                                       // Startup transient: queue hasn't gathered numHits yet
+                                       int slot = totalHits - 1;
+                                       // Copy hit into queue
+                                       for (int i = 0; i < comparators.Length; i++)
+                                       {
+                                               comparators[i].Copy(slot, doc);
+                                       }
+                                       Add(slot, doc, System.Single.NaN);
+                                       if (queueFull)
+                                       {
+                                               for (int i = 0; i < comparators.Length; i++)
+                                               {
+                                                       comparators[i].SetBottom(bottom.slot);
+                                               }
+                                       }
+                               }
+                       }
+                       
+                       public override bool AcceptsDocsOutOfOrder()
+                       {
+                               return true;
+                       }
+               }
+               
+               /*
+               * Implements a TopFieldCollector over multiple SortField criteria, with
+               * tracking document scores and maxScore.
+               */
+               private class MultiComparatorScoringMaxScoreCollector:MultiComparatorNonScoringCollector
+               {
+                       
+                       internal Scorer scorer;
+                       
+                       public MultiComparatorScoringMaxScoreCollector(FieldValueHitQueue queue, int numHits, bool fillFields):base(queue, numHits, fillFields)
+                       {
+                               // Must set maxScore to NEG_INF, or otherwise Math.max always returns NaN.
+                               maxScore = System.Single.NegativeInfinity;
+                       }
+                       
+                       internal void  UpdateBottom(int doc, float score)
+                       {
+                               bottom.docID = docBase + doc;
+                               bottom.score = score;
+                               bottom = (Entry) pq.UpdateTop();
+                       }
+                       
+                       public override void  Collect(int doc)
+                       {
+                               float score = scorer.Score();
+                               if (score > maxScore)
+                               {
+                                       maxScore = score;
+                               }
+                               ++totalHits;
+                               if (queueFull)
+                               {
+                                       // Fastmatch: return if this hit is not competitive
+                                       for (int i = 0; ; i++)
+                                       {
+                                               int c = reverseMul[i] * comparators[i].CompareBottom(doc);
+                                               if (c < 0)
+                                               {
+                                                       // Definitely not competitive.
+                                                       return ;
+                                               }
+                                               else if (c > 0)
+                                               {
+                                                       // Definitely competitive.
+                                                       break;
+                                               }
+                                               else if (i == comparators.Length - 1)
+                                               {
+                                                       // Here c=0. If we're at the last comparator, this doc is not
+                                                       // competitive, since docs are visited in doc Id order, which means
+                                                       // this doc cannot compete with any other document in the queue.
+                                                       return ;
+                                               }
+                                       }
+                                       
+                                       // This hit is competitive - replace bottom element in queue & adjustTop
+                                       for (int i = 0; i < comparators.Length; i++)
+                                       {
+                                               comparators[i].Copy(bottom.slot, doc);
+                                       }
+                                       
+                                       UpdateBottom(doc, score);
+                                       
+                                       for (int i = 0; i < comparators.Length; i++)
+                                       {
+                                               comparators[i].SetBottom(bottom.slot);
+                                       }
+                               }
+                               else
+                               {
+                                       // Startup transient: queue hasn't gathered numHits yet
+                                       int slot = totalHits - 1;
+                                       // Copy hit into queue
+                                       for (int i = 0; i < comparators.Length; i++)
+                                       {
+                                               comparators[i].Copy(slot, doc);
+                                       }
+                                       Add(slot, doc, score);
+                                       if (queueFull)
+                                       {
+                                               for (int i = 0; i < comparators.Length; i++)
+                                               {
+                                                       comparators[i].SetBottom(bottom.slot);
+                                               }
+                                       }
+                               }
+                       }
+                       
+                       public override void  SetScorer(Scorer scorer)
+                       {
+                               this.scorer = scorer;
+                               base.SetScorer(scorer);
+                       }
+               }
+               
+               /*
+               * Implements a TopFieldCollector over multiple SortField criteria, with
+               * tracking document scores and maxScore, and assumes out of orderness in doc
+               * Ids collection.
+               */
+               private sealed class OutOfOrderMultiComparatorScoringMaxScoreCollector:MultiComparatorScoringMaxScoreCollector
+               {
+                       
+                       public OutOfOrderMultiComparatorScoringMaxScoreCollector(FieldValueHitQueue queue, int numHits, bool fillFields):base(queue, numHits, fillFields)
+                       {
+                       }
+                       
+                       public override void  Collect(int doc)
+                       {
+                               float score = scorer.Score();
+                               if (score > maxScore)
+                               {
+                                       maxScore = score;
+                               }
+                               ++totalHits;
+                               if (queueFull)
+                               {
+                                       // Fastmatch: return if this hit is not competitive
+                                       for (int i = 0; ; i++)
+                                       {
+                                               int c = reverseMul[i] * comparators[i].CompareBottom(doc);
+                                               if (c < 0)
+                                               {
+                                                       // Definitely not competitive.
+                                                       return ;
+                                               }
+                                               else if (c > 0)
+                                               {
+                                                       // Definitely competitive.
+                                                       break;
+                                               }
+                                               else if (i == comparators.Length - 1)
+                                               {
+                                                       // This is the equals case.
+                                                       if (doc + docBase > bottom.docID)
+                                                       {
+                                                               // Definitely not competitive
+                                                               return ;
+                                                       }
+                                                       break;
+                                               }
+                                       }
+                                       
+                                       // This hit is competitive - replace bottom element in queue & adjustTop
+                                       for (int i = 0; i < comparators.Length; i++)
+                                       {
+                                               comparators[i].Copy(bottom.slot, doc);
+                                       }
+                                       
+                                       UpdateBottom(doc, score);
+                                       
+                                       for (int i = 0; i < comparators.Length; i++)
+                                       {
+                                               comparators[i].SetBottom(bottom.slot);
+                                       }
+                               }
+                               else
+                               {
+                                       // Startup transient: queue hasn't gathered numHits yet
+                                       int slot = totalHits - 1;
+                                       // Copy hit into queue
+                                       for (int i = 0; i < comparators.Length; i++)
+                                       {
+                                               comparators[i].Copy(slot, doc);
+                                       }
+                                       Add(slot, doc, score);
+                                       if (queueFull)
+                                       {
+                                               for (int i = 0; i < comparators.Length; i++)
+                                               {
+                                                       comparators[i].SetBottom(bottom.slot);
+                                               }
+                                       }
+                               }
+                       }
+                       
+                       public override bool AcceptsDocsOutOfOrder()
+                       {
+                               return true;
+                       }
+               }
+               
+               /*
+               * Implements a TopFieldCollector over multiple SortField criteria, with
+               * tracking document scores and maxScore.
+               */
+               private class MultiComparatorScoringNoMaxScoreCollector:MultiComparatorNonScoringCollector
+               {
+                       
+                       internal Scorer scorer;
+                       
+                       public MultiComparatorScoringNoMaxScoreCollector(FieldValueHitQueue queue, int numHits, bool fillFields):base(queue, numHits, fillFields)
+                       {
+                       }
+                       
+                       internal void  UpdateBottom(int doc, float score)
+                       {
+                               bottom.docID = docBase + doc;
+                               bottom.score = score;
+                               bottom = (Entry) pq.UpdateTop();
+                       }
+                       
+                       public override void  Collect(int doc)
+                       {
+                               ++totalHits;
+                               if (queueFull)
+                               {
+                                       // Fastmatch: return if this hit is not competitive
+                                       for (int i = 0; ; i++)
+                                       {
+                                               int c = reverseMul[i] * comparators[i].CompareBottom(doc);
+                                               if (c < 0)
+                                               {
+                                                       // Definitely not competitive.
+                                                       return ;
+                                               }
+                                               else if (c > 0)
+                                               {
+                                                       // Definitely competitive.
+                                                       break;
+                                               }
+                                               else if (i == comparators.Length - 1)
+                                               {
+                                                       // Here c=0. If we're at the last comparator, this doc is not
+                                                       // competitive, since docs are visited in doc Id order, which means
+                                                       // this doc cannot compete with any other document in the queue.
+                                                       return ;
+                                               }
+                                       }
+                                       
+                                       // This hit is competitive - replace bottom element in queue & adjustTop
+                                       for (int i = 0; i < comparators.Length; i++)
+                                       {
+                                               comparators[i].Copy(bottom.slot, doc);
+                                       }
+                                       
+                                       // Compute score only if it is competitive.
+                                       float score = scorer.Score();
+                                       UpdateBottom(doc, score);
+                                       
+                                       for (int i = 0; i < comparators.Length; i++)
+                                       {
+                                               comparators[i].SetBottom(bottom.slot);
+                                       }
+                               }
+                               else
+                               {
+                                       // Startup transient: queue hasn't gathered numHits yet
+                                       int slot = totalHits - 1;
+                                       // Copy hit into queue
+                                       for (int i = 0; i < comparators.Length; i++)
+                                       {
+                                               comparators[i].Copy(slot, doc);
+                                       }
+                                       
+                                       // Compute score only if it is competitive.
+                                       float score = scorer.Score();
+                                       Add(slot, doc, score);
+                                       if (queueFull)
+                                       {
+                                               for (int i = 0; i < comparators.Length; i++)
+                                               {
+                                                       comparators[i].SetBottom(bottom.slot);
+                                               }
+                                       }
+                               }
+                       }
+                       
+                       public override void  SetScorer(Scorer scorer)
+                       {
+                               this.scorer = scorer;
+                               base.SetScorer(scorer);
+                       }
+               }
+               
+               /*
+               * Implements a TopFieldCollector over multiple SortField criteria, with
+               * tracking document scores and maxScore, and assumes out of orderness in doc
+               * Ids collection.
+               */
+               private sealed class OutOfOrderMultiComparatorScoringNoMaxScoreCollector:MultiComparatorScoringNoMaxScoreCollector
+               {
+                       
+                       public OutOfOrderMultiComparatorScoringNoMaxScoreCollector(FieldValueHitQueue queue, int numHits, bool fillFields):base(queue, numHits, fillFields)
+                       {
+                       }
+                       
+                       public override void  Collect(int doc)
+                       {
+                               ++totalHits;
+                               if (queueFull)
+                               {
+                                       // Fastmatch: return if this hit is not competitive
+                                       for (int i = 0; ; i++)
+                                       {
+                                               int c = reverseMul[i] * comparators[i].CompareBottom(doc);
+                                               if (c < 0)
+                                               {
+                                                       // Definitely not competitive.
+                                                       return ;
+                                               }
+                                               else if (c > 0)
+                                               {
+                                                       // Definitely competitive.
+                                                       break;
+                                               }
+                                               else if (i == comparators.Length - 1)
+                                               {
+                                                       // This is the equals case.
+                                                       if (doc + docBase > bottom.docID)
+                                                       {
+                                                               // Definitely not competitive
+                                                               return ;
+                                                       }
+                                                       break;
+                                               }
+                                       }
+                                       
+                                       // This hit is competitive - replace bottom element in queue & adjustTop
+                                       for (int i = 0; i < comparators.Length; i++)
+                                       {
+                                               comparators[i].Copy(bottom.slot, doc);
+                                       }
+                                       
+                                       // Compute score only if it is competitive.
+                                       float score = scorer.Score();
+                                       UpdateBottom(doc, score);
+                                       
+                                       for (int i = 0; i < comparators.Length; i++)
+                                       {
+                                               comparators[i].SetBottom(bottom.slot);
+                                       }
+                               }
+                               else
+                               {
+                                       // Startup transient: queue hasn't gathered numHits yet
+                                       int slot = totalHits - 1;
+                                       // Copy hit into queue
+                                       for (int i = 0; i < comparators.Length; i++)
+                                       {
+                                               comparators[i].Copy(slot, doc);
+                                       }
+                                       
+                                       // Compute score only if it is competitive.
+                                       float score = scorer.Score();
+                                       Add(slot, doc, score);
+                                       if (queueFull)
+                                       {
+                                               for (int i = 0; i < comparators.Length; i++)
+                                               {
+                                                       comparators[i].SetBottom(bottom.slot);
+                                               }
+                                       }
+                               }
+                       }
+                       
+                       public override void  SetScorer(Scorer scorer)
+                       {
+                               this.scorer = scorer;
+                               base.SetScorer(scorer);
+                       }
+                       
+                       public override bool AcceptsDocsOutOfOrder()
+                       {
+                               return true;
+                       }
+               }
+               
+               private static readonly ScoreDoc[] EMPTY_SCOREDOCS = new ScoreDoc[0];
+               
+               private bool fillFields;
+               
+               /*
+               * Stores the maximum score value encountered, needed for normalizing. If
+               * document scores are not tracked, this value is initialized to NaN.
+               */
+               internal float maxScore = System.Single.NaN;
+               
+               internal int numHits;
+               internal FieldValueHitQueue.Entry bottom = null;
+               internal bool queueFull;
+               internal int docBase;
+               
+               // Declaring the constructor private prevents extending this class by anyone
+               // else. Note that the class cannot be final since it's extended by the
+               // internal versions. If someone will define a constructor with any other
+               // visibility, then anyone will be able to extend the class, which is not what
+               // we want.
+               private TopFieldCollector(PriorityQueue pq, int numHits, bool fillFields):base(pq)
+               {
+                       this.numHits = numHits;
+                       this.fillFields = fillFields;
+               }
+               
+               /// <summary> Creates a new {@link TopFieldCollector} from the given
+               /// arguments.
+               /// 
+               /// <p/><b>NOTE</b>: The instances returned by this method
+               /// pre-allocate a full array of length
+               /// <code>numHits</code>.
+               /// 
+               /// </summary>
+               /// <param name="sort">the sort criteria (SortFields).
+               /// </param>
+               /// <param name="numHits">the number of results to collect.
+               /// </param>
+               /// <param name="fillFields">specifies whether the actual field values should be returned on
+               /// the results (FieldDoc).
+               /// </param>
+               /// <param name="trackDocScores">specifies whether document scores should be tracked and set on the
+               /// results. Note that if set to false, then the results' scores will
+               /// be set to Float.NaN. Setting this to true affects performance, as
+               /// it incurs the score computation on each competitive result.
+               /// Therefore if document scores are not required by the application,
+               /// it is recommended to set it to false.
+               /// </param>
+               /// <param name="trackMaxScore">specifies whether the query's maxScore should be tracked and set
+               /// on the resulting {@link TopDocs}. Note that if set to false,
+               /// {@link TopDocs#GetMaxScore()} returns Float.NaN. Setting this to
+               /// true affects performance as it incurs the score computation on
+               /// each result. Also, setting this true automatically sets
+               /// <code>trackDocScores</code> to true as well.
+               /// </param>
+               /// <param name="docsScoredInOrder">specifies whether documents are scored in doc Id order or not by
+               /// the given {@link Scorer} in {@link #SetScorer(Scorer)}.
+               /// </param>
+               /// <returns> a {@link TopFieldCollector} instance which will sort the results by
+               /// the sort criteria.
+               /// </returns>
+               /// <throws>  IOException </throws>
+               public static TopFieldCollector create(Sort sort, int numHits, bool fillFields, bool trackDocScores, bool trackMaxScore, bool docsScoredInOrder)
+               {
+                       if (sort.fields.Length == 0)
+                       {
+                               throw new System.ArgumentException("Sort must contain at least one field");
+                       }
+                       
+                       FieldValueHitQueue queue = FieldValueHitQueue.Create(sort.fields, numHits);
+                       if (queue.GetComparators().Length == 1)
+                       {
+                               if (docsScoredInOrder)
+                               {
+                                       if (trackMaxScore)
+                                       {
+                                               return new OneComparatorScoringMaxScoreCollector(queue, numHits, fillFields);
+                                       }
+                                       else if (trackDocScores)
+                                       {
+                                               return new OneComparatorScoringNoMaxScoreCollector(queue, numHits, fillFields);
+                                       }
+                                       else
+                                       {
+                                               return new OneComparatorNonScoringCollector(queue, numHits, fillFields);
+                                       }
+                               }
+                               else
+                               {
+                                       if (trackMaxScore)
+                                       {
+                                               return new OutOfOrderOneComparatorScoringMaxScoreCollector(queue, numHits, fillFields);
+                                       }
+                                       else if (trackDocScores)
+                                       {
+                                               return new OutOfOrderOneComparatorScoringNoMaxScoreCollector(queue, numHits, fillFields);
+                                       }
+                                       else
+                                       {
+                                               return new OutOfOrderOneComparatorNonScoringCollector(queue, numHits, fillFields);
+                                       }
+                               }
+                       }
+                       
+                       // multiple comparators.
+                       if (docsScoredInOrder)
+                       {
+                               if (trackMaxScore)
+                               {
+                                       return new MultiComparatorScoringMaxScoreCollector(queue, numHits, fillFields);
+                               }
+                               else if (trackDocScores)
+                               {
+                                       return new MultiComparatorScoringNoMaxScoreCollector(queue, numHits, fillFields);
+                               }
+                               else
+                               {
+                                       return new MultiComparatorNonScoringCollector(queue, numHits, fillFields);
+                               }
+                       }
+                       else
+                       {
+                               if (trackMaxScore)
+                               {
+                                       return new OutOfOrderMultiComparatorScoringMaxScoreCollector(queue, numHits, fillFields);
+                               }
+                               else if (trackDocScores)
+                               {
+                                       return new OutOfOrderMultiComparatorScoringNoMaxScoreCollector(queue, numHits, fillFields);
+                               }
+                               else
+                               {
+                                       return new OutOfOrderMultiComparatorNonScoringCollector(queue, numHits, fillFields);
+                               }
+                       }
+               }
+               
+               internal void  Add(int slot, int doc, float score)
+               {
+                       bottom = (Entry) pq.Add(new Entry(slot, docBase + doc, score));
+                       queueFull = totalHits == numHits;
+               }
+               
+               /*
+               * Only the following callback methods need to be overridden since
+               * topDocs(int, int) calls them to return the results.
+               */
+               
+               protected internal override void  PopulateResults(ScoreDoc[] results, int howMany)
+               {
+                       if (fillFields)
+                       {
+                               // avoid casting if unnecessary.
+                               FieldValueHitQueue queue = (FieldValueHitQueue) pq;
+                               for (int i = howMany - 1; i >= 0; i--)
+                               {
+                                       results[i] = queue.FillFields((Entry) queue.Pop());
+                               }
+                       }
+                       else
+                       {
+                               for (int i = howMany - 1; i >= 0; i--)
+                               {
+                                       Entry entry = (Entry) pq.Pop();
+                                       results[i] = new FieldDoc(entry.docID, entry.score);
+                               }
+                       }
+               }
+               
+               public /*protected internal*/ override TopDocs NewTopDocs(ScoreDoc[] results, int start)
+               {
+                       if (results == null)
+                       {
+                               results = EMPTY_SCOREDOCS;
+                               // Set maxScore to NaN, in case this is a maxScore tracking collector.
+                               maxScore = System.Single.NaN;
+                       }
+                       
+                       // If this is a maxScoring tracking collector and there were no results, 
+                       return new TopFieldDocs(totalHits, results, ((FieldValueHitQueue) pq).GetFields(), maxScore);
+               }
+               
+               public override bool AcceptsDocsOutOfOrder()
+               {
+                       return false;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/TopFieldDocCollector.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/TopFieldDocCollector.cs
new file mode 100644 (file)
index 0000000..dc55a81
--- /dev/null
@@ -0,0 +1,87 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary>A {@link HitCollector} implementation that collects the top-sorting
+       /// documents, returning them as a {@link TopFieldDocs}.  This is used by {@link
+       /// IndexSearcher} to implement {@link TopFieldDocs}-based search.
+       /// 
+       /// <p/>This may be extended, overriding the collect method to, e.g.,
+       /// conditionally invoke <code>super()</code> in order to filter which
+       /// documents are collected.
+       /// 
+       /// </summary>
+       /// <deprecated> Please use {@link TopFieldCollector} instead.
+       /// </deprecated>
+    [Obsolete("Please use TopFieldCollector instead.")]
+       public class TopFieldDocCollector:TopDocCollector
+       {
+               
+               private FieldDoc reusableFD;
+               
+               /// <summary>Construct to collect a given number of hits.</summary>
+               /// <param name="reader">the index to be searched
+               /// </param>
+               /// <param name="sort">the sort criteria
+               /// </param>
+               /// <param name="numHits">the maximum number of hits to collect
+               /// </param>
+               public TopFieldDocCollector(IndexReader reader, Sort sort, int numHits):base(new FieldSortedHitQueue(reader, sort.fields, numHits))
+               {
+               }
+               
+               // javadoc inherited
+               public override void  Collect(int doc, float score)
+               {
+                       if (score > 0.0f)
+                       {
+                               totalHits++;
+                               if (reusableFD == null)
+                                       reusableFD = new FieldDoc(doc, score);
+                               else
+                               {
+                                       // Whereas TopScoreDocCollector can skip this if the
+                                       // score is not competitive, we cannot because the
+                                       // comparators in the FieldSortedHitQueue.lessThan
+                                       // aren't in general congruent with "higher score
+                                       // wins"
+                                       reusableFD.score = score;
+                                       reusableFD.doc = doc;
+                               }
+                               reusableFD = (FieldDoc) hq.InsertWithOverflow(reusableFD);
+                       }
+               }
+               
+               // javadoc inherited
+               public override TopDocs TopDocs()
+               {
+                       FieldSortedHitQueue fshq = (FieldSortedHitQueue) hq;
+                       ScoreDoc[] scoreDocs = new ScoreDoc[fshq.Size()];
+                       for (int i = fshq.Size() - 1; i >= 0; i--)
+                       // put docs in array
+                               scoreDocs[i] = fshq.FillFields((FieldDoc) fshq.Pop());
+                       
+                       return new TopFieldDocs(totalHits, scoreDocs, fshq.GetFields(), fshq.GetMaxScore());
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/TopFieldDocs.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/TopFieldDocs.cs
new file mode 100644 (file)
index 0000000..39f2036
--- /dev/null
@@ -0,0 +1,47 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary>
+       /// Represents hits returned by {@link Searcher#search(Query,Filter,int,Sort)}.
+       /// </summary>
+       [Serializable]
+       public class TopFieldDocs:TopDocs
+       {
+               
+               /// <summary>The fields which were used to sort results by. </summary>
+               public SortField[] fields;
+               
+               /// <summary>Creates one of these objects.</summary>
+               /// <param name="totalHits"> Total number of hits for the query.
+               /// </param>
+               /// <param name="scoreDocs"> The top hits for the query.
+               /// </param>
+               /// <param name="fields">    The sort criteria used to find the top hits.
+               /// </param>
+               /// <param name="maxScore">  The maximum score encountered.
+               /// </param>
+               public TopFieldDocs(int totalHits, ScoreDoc[] scoreDocs, SortField[] fields, float maxScore):base(totalHits, scoreDocs, maxScore)
+               {
+                       this.fields = fields;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/TopScoreDocCollector.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/TopScoreDocCollector.cs
new file mode 100644 (file)
index 0000000..d5d0382
--- /dev/null
@@ -0,0 +1,177 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary> A {@link Collector} implementation that collects the top-scoring hits,
+       /// returning them as a {@link TopDocs}. This is used by {@link IndexSearcher} to
+       /// implement {@link TopDocs}-based search. Hits are sorted by score descending
+       /// and then (when the scores are tied) docID ascending. When you create an
+       /// instance of this collector you should know in advance whether documents are
+       /// going to be collected in doc Id order or not.
+       /// 
+       /// <p/><b>NOTE</b>: The values {@link Float#NaN} and
+    /// {Float#NEGATIVE_INFINITY} are not valid scores.  This
+    /// collector will not properly collect hits with such
+    /// scores.
+       /// </summary>
+       public abstract class TopScoreDocCollector:TopDocsCollector
+       {
+               
+               // Assumes docs are scored in order.
+               private class InOrderTopScoreDocCollector:TopScoreDocCollector
+               {
+                       internal InOrderTopScoreDocCollector(int numHits):base(numHits)
+                       {
+                       }
+                       
+                       public override void  Collect(int doc)
+                       {
+                               float score = scorer.Score();
+                
+                // This collector cannot handle these scores:
+                System.Diagnostics.Debug.Assert(score != float.NegativeInfinity);
+                System.Diagnostics.Debug.Assert(!float.IsNaN(score));
+
+                               totalHits++;
+                               if (score <= pqTop.score)
+                               {
+                                       // Since docs are returned in-order (i.e., increasing doc Id), a document
+                                       // with equal score to pqTop.score cannot compete since HitQueue favors
+                                       // documents with lower doc Ids. Therefore reject those docs too.
+                                       return ;
+                               }
+                               pqTop.doc = doc + docBase;
+                               pqTop.score = score;
+                               pqTop = (ScoreDoc) pq.UpdateTop();
+                       }
+                       
+                       public override bool AcceptsDocsOutOfOrder()
+                       {
+                               return false;
+                       }
+               }
+               
+               // Assumes docs are scored out of order.
+               private class OutOfOrderTopScoreDocCollector:TopScoreDocCollector
+               {
+                       internal OutOfOrderTopScoreDocCollector(int numHits):base(numHits)
+                       {
+                       }
+                       
+                       public override void  Collect(int doc)
+                       {
+                               float score = scorer.Score();
+
+                // This collector cannot handle NaN
+                System.Diagnostics.Debug.Assert(!float.IsNaN(score));
+
+                               totalHits++;
+                               doc += docBase;
+                               if (score < pqTop.score || (score == pqTop.score && doc > pqTop.doc))
+                               {
+                                       return ;
+                               }
+                               pqTop.doc = doc;
+                               pqTop.score = score;
+                               pqTop = (ScoreDoc) pq.UpdateTop();
+                       }
+                       
+                       public override bool AcceptsDocsOutOfOrder()
+                       {
+                               return true;
+                       }
+               }
+               
+               /// <summary> Creates a new {@link TopScoreDocCollector} given the number of hits to
+               /// collect and whether documents are scored in order by the input
+               /// {@link Scorer} to {@link #SetScorer(Scorer)}.
+               /// 
+               /// <p/><b>NOTE</b>: The instances returned by this method
+               /// pre-allocate a full array of length
+               /// <code>numHits</code>, and fill the array with sentinel
+               /// objects.
+               /// </summary>
+               public static TopScoreDocCollector create(int numHits, bool docsScoredInOrder)
+               {
+                       
+                       if (docsScoredInOrder)
+                       {
+                               return new InOrderTopScoreDocCollector(numHits);
+                       }
+                       else
+                       {
+                               return new OutOfOrderTopScoreDocCollector(numHits);
+                       }
+               }
+               
+               internal ScoreDoc pqTop;
+               internal int docBase = 0;
+               internal Scorer scorer;
+               
+               // prevents instantiation
+               private TopScoreDocCollector(int numHits):base(new HitQueue(numHits, true))
+               {
+                       // HitQueue implements getSentinelObject to return a ScoreDoc, so we know
+                       // that at this point top() is already initialized.
+                       pqTop = (ScoreDoc) pq.Top();
+               }
+               
+               public /*protected internal*/ override TopDocs NewTopDocs(ScoreDoc[] results, int start)
+               {
+                       if (results == null)
+                       {
+                               return EMPTY_TOPDOCS;
+                       }
+                       
+                       // We need to compute maxScore in order to set it in TopDocs. If start == 0,
+                       // it means the largest element is already in results, use its score as
+                       // maxScore. Otherwise pop everything else, until the largest element is
+                       // extracted and use its score as maxScore.
+                       float maxScore = System.Single.NaN;
+                       if (start == 0)
+                       {
+                               maxScore = results[0].score;
+                       }
+                       else
+                       {
+                               for (int i = pq.Size(); i > 1; i--)
+                               {
+                                       pq.Pop();
+                               }
+                               maxScore = ((ScoreDoc) pq.Pop()).score;
+                       }
+                       
+                       return new TopDocs(totalHits, results, maxScore);
+               }
+               
+               public override void  SetNextReader(IndexReader reader, int base_Renamed)
+               {
+                       docBase = base_Renamed;
+               }
+               
+               public override void  SetScorer(Scorer scorer)
+               {
+                       this.scorer = scorer;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Weight.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/Weight.cs
new file mode 100644 (file)
index 0000000..1f800ac
--- /dev/null
@@ -0,0 +1,125 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary> Expert: Calculate query weights and build query scorers.
+       /// <p/>
+       /// The purpose of {@link Weight} is to ensure searching does not
+       /// modify a {@link Query}, so that a {@link Query} instance can be reused. <br/>
+       /// {@link Searcher} dependent state of the query should reside in the
+       /// {@link Weight}. <br/>
+       /// {@link IndexReader} dependent state should reside in the {@link Scorer}.
+       /// <p/>
+       /// A <code>Weight</code> is used in the following way:
+       /// <ol>
+       /// <li>A <code>Weight</code> is constructed by a top-level query, given a
+       /// <code>Searcher</code> ({@link Query#CreateWeight(Searcher)}).</li>
+       /// <li>The {@link #SumOfSquaredWeights()} method is called on the
+       /// <code>Weight</code> to compute the query normalization factor
+       /// {@link Similarity#QueryNorm(float)} of the query clauses contained in the
+       /// query.</li>
+       /// <li>The query normalization factor is passed to {@link #Normalize(float)}. At
+       /// this point the weighting is complete.</li>
+       /// <li>A <code>Scorer</code> is constructed by {@link #Scorer(IndexReader,boolean,boolean)}.</li>
+       /// </ol>
+       /// 
+       /// </summary>
+       /// <since> 2.9
+       /// </since>
+       [Serializable]
+       public abstract class Weight
+       {
+               
+               /// <summary> An explanation of the score computation for the named document.
+               /// 
+               /// </summary>
+               /// <param name="reader">sub-reader containing the give doc
+               /// </param>
+               /// <param name="doc">
+               /// </param>
+               /// <returns> an Explanation for the score
+               /// </returns>
+               /// <throws>  IOException </throws>
+               public abstract Explanation Explain(IndexReader reader, int doc);
+               
+               /// <summary>The query that this concerns. </summary>
+               public abstract Query GetQuery();
+               
+               /// <summary>The weight for this query. </summary>
+               public abstract float GetValue();
+               
+               /// <summary>Assigns the query normalization factor to this. </summary>
+               public abstract void  Normalize(float norm);
+               
+               /// <summary> Returns a {@link Scorer} which scores documents in/out-of order according
+               /// to <code>scoreDocsInOrder</code>.
+               /// <p/>
+               /// <b>NOTE:</b> even if <code>scoreDocsInOrder</code> is false, it is
+               /// recommended to check whether the returned <code>Scorer</code> indeed scores
+               /// documents out of order (i.e., call {@link #ScoresDocsOutOfOrder()}), as
+               /// some <code>Scorer</code> implementations will always return documents
+               /// in-order.<br/>
+               /// <b>NOTE:</b> null can be returned if no documents will be scored by this
+               /// query.
+               /// 
+               /// </summary>
+               /// <param name="reader">
+               /// the {@link IndexReader} for which to return the {@link Scorer}.
+               /// </param>
+               /// <param name="scoreDocsInOrder">specifies whether in-order scoring of documents is required. Note
+               /// that if set to false (i.e., out-of-order scoring is required),
+               /// this method can return whatever scoring mode it supports, as every
+               /// in-order scorer is also an out-of-order one. However, an
+               /// out-of-order scorer may not support {@link Scorer#NextDoc()}
+               /// and/or {@link Scorer#Advance(int)}, therefore it is recommended to
+               /// request an in-order scorer if use of these methods is required.
+               /// </param>
+               /// <param name="topScorer">
+               /// if true, {@link Scorer#Score(Collector)} will be called; if false,
+               /// {@link Scorer#NextDoc()} and/or {@link Scorer#Advance(int)} will
+               /// be called.
+               /// </param>
+               /// <returns> a {@link Scorer} which scores documents in/out-of order.
+               /// </returns>
+               /// <throws>  IOException </throws>
+               public abstract Scorer Scorer(IndexReader reader, bool scoreDocsInOrder, bool topScorer);
+               
+               /// <summary>The sum of squared weights of contained query clauses. </summary>
+               public abstract float SumOfSquaredWeights();
+               
+               /// <summary> Returns true iff this implementation scores docs only out of order. This
+               /// method is used in conjunction with {@link Collector}'s
+               /// {@link Collector#AcceptsDocsOutOfOrder() acceptsDocsOutOfOrder} and
+               /// {@link #Scorer(Mono.Lucene.Net.Index.IndexReader, boolean, boolean)} to
+               /// create a matching {@link Scorer} instance for a given {@link Collector}, or
+               /// vice versa.
+               /// <p/>
+               /// <b>NOTE:</b> the default implementation returns <code>false</code>, i.e.
+               /// the <code>Scorer</code> scores documents in-order.
+               /// </summary>
+               public virtual bool ScoresDocsOutOfOrder()
+               {
+                       return false;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/WildcardQuery.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/WildcardQuery.cs
new file mode 100644 (file)
index 0000000..4506925
--- /dev/null
@@ -0,0 +1,116 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+using Term = Mono.Lucene.Net.Index.Term;
+using ToStringUtils = Mono.Lucene.Net.Util.ToStringUtils;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary>Implements the wildcard search query. Supported wildcards are <code>*</code>, which
+       /// matches any character sequence (including the empty one), and <code>?</code>,
+       /// which matches any single character. Note this query can be slow, as it
+       /// needs to iterate over many terms. In order to prevent extremely slow WildcardQueries,
+       /// a Wildcard term should not start with one of the wildcards <code>*</code> or
+       /// <code>?</code>.
+       /// 
+       /// <p/>This query uses the {@link
+       /// MultiTermQuery#CONSTANT_SCORE_AUTO_REWRITE_DEFAULT}
+       /// rewrite method.
+       /// 
+       /// </summary>
+       /// <seealso cref="WildcardTermEnum">
+       /// </seealso>
+       [Serializable]
+       public class WildcardQuery:MultiTermQuery
+       {
+               private bool termContainsWildcard;
+               new protected internal Term term;
+               
+               public WildcardQuery(Term term):base(term)
+               { //will be removed in 3.0
+                       this.term = term;
+                       this.termContainsWildcard = (term.Text().IndexOf('*') != - 1) || (term.Text().IndexOf('?') != - 1);
+               }
+               
+               public /*protected internal*/ override FilteredTermEnum GetEnum(IndexReader reader)
+               {
+                       return new WildcardTermEnum(reader, GetTerm());
+               }
+               
+               /// <summary> Returns the pattern term.</summary>
+        [Obsolete("Mono.Lucene.Net-2.9.1. This method overrides obsolete member Mono.Lucene.Net.Search.MultiTermQuery.GetTerm()")]
+               public override Term GetTerm()
+               {
+                       return term;
+               }
+               
+               public override Query Rewrite(IndexReader reader)
+               {
+                       if (!termContainsWildcard)
+                               return new TermQuery(GetTerm());
+                       else
+                               return base.Rewrite(reader);
+               }
+               
+               /// <summary>Prints a user-readable version of this query. </summary>
+               public override System.String ToString(System.String field)
+               {
+                       System.Text.StringBuilder buffer = new System.Text.StringBuilder();
+                       if (!term.Field().Equals(field))
+                       {
+                               buffer.Append(term.Field());
+                               buffer.Append(":");
+                       }
+                       buffer.Append(term.Text());
+                       buffer.Append(ToStringUtils.Boost(GetBoost()));
+                       return buffer.ToString();
+               }
+               
+               //@Override
+               public override int GetHashCode()
+               {
+                       int prime = 31;
+                       int result = base.GetHashCode();
+                       result = prime * result + ((term == null)?0:term.GetHashCode());
+                       return result;
+               }
+               
+               //@Override
+               public  override bool Equals(System.Object obj)
+               {
+                       if (this == obj)
+                               return true;
+                       if (!base.Equals(obj))
+                               return false;
+                       if (GetType() != obj.GetType())
+                               return false;
+                       WildcardQuery other = (WildcardQuery) obj;
+                       if (term == null)
+                       {
+                               if (other.term != null)
+                                       return false;
+                       }
+                       else if (!term.Equals(other.term))
+                               return false;
+                       return true;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/WildcardTermEnum.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Search/WildcardTermEnum.cs
new file mode 100644 (file)
index 0000000..bd481c5
--- /dev/null
@@ -0,0 +1,199 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+using Term = Mono.Lucene.Net.Index.Term;
+
+namespace Mono.Lucene.Net.Search
+{
+       
+       /// <summary> Subclass of FilteredTermEnum for enumerating all terms that match the
+       /// specified wildcard filter term.
+       /// <p/>
+       /// Term enumerations are always ordered by Term.compareTo().  Each term in
+       /// the enumeration is greater than all that precede it.
+       /// 
+       /// </summary>
+       /// <version>  $Id: WildcardTermEnum.java 783371 2009-06-10 14:39:56Z mikemccand $
+       /// </version>
+       public class WildcardTermEnum:FilteredTermEnum
+       {
+               internal Term searchTerm;
+               internal System.String field;
+               internal System.String text;
+               internal System.String pre;
+               internal int preLen;
+               internal bool endEnum = false;
+               
+               /// <summary> Creates a new <code>WildcardTermEnum</code>.
+               /// <p/>
+               /// After calling the constructor the enumeration is already pointing to the first 
+               /// valid term if such a term exists.
+               /// </summary>
+               public WildcardTermEnum(IndexReader reader, Term term):base()
+               {
+                       searchTerm = term;
+                       field = searchTerm.Field();
+                       System.String searchTermText = searchTerm.Text();
+                       
+                       int sidx = searchTermText.IndexOf((System.Char) WILDCARD_STRING);
+                       int cidx = searchTermText.IndexOf((System.Char) WILDCARD_CHAR);
+                       int idx = sidx;
+                       if (idx == - 1)
+                       {
+                               idx = cidx;
+                       }
+                       else if (cidx >= 0)
+                       {
+                               idx = System.Math.Min(idx, cidx);
+                       }
+                       pre = idx != - 1?searchTerm.Text().Substring(0, (idx) - (0)):"";
+                       
+                       preLen = pre.Length;
+                       text = searchTermText.Substring(preLen);
+                       SetEnum(reader.Terms(new Term(searchTerm.Field(), pre)));
+               }
+               
+               public /*protected internal*/ override bool TermCompare(Term term)
+               {
+                       if ((System.Object) field == (System.Object) term.Field())
+                       {
+                               System.String searchText = term.Text();
+                               if (searchText.StartsWith(pre))
+                               {
+                                       return WildcardEquals(text, 0, searchText, preLen);
+                               }
+                       }
+                       endEnum = true;
+                       return false;
+               }
+               
+               public override float Difference()
+               {
+                       return 1.0f;
+               }
+               
+               public override bool EndEnum()
+               {
+                       return endEnum;
+               }
+               
+               /// <summary>*****************************************
+               /// String equality with support for wildcards
+               /// ******************************************
+               /// </summary>
+               
+               public const char WILDCARD_STRING = '*';
+               public const char WILDCARD_CHAR = '?';
+               
+               /// <summary> Determines if a word matches a wildcard pattern.
+               /// <small>Work released by Granta Design Ltd after originally being done on
+               /// company time.</small>
+               /// </summary>
+               public static bool WildcardEquals(System.String pattern, int patternIdx, System.String string_Renamed, int stringIdx)
+               {
+                       int p = patternIdx;
+                       
+                       for (int s = stringIdx; ; ++p, ++s)
+                       {
+                               // End of string yet?
+                               bool sEnd = (s >= string_Renamed.Length);
+                               // End of pattern yet?
+                               bool pEnd = (p >= pattern.Length);
+                               
+                               // If we're looking at the end of the string...
+                               if (sEnd)
+                               {
+                                       // Assume the only thing left on the pattern is/are wildcards
+                                       bool justWildcardsLeft = true;
+                                       
+                                       // Current wildcard position
+                                       int wildcardSearchPos = p;
+                                       // While we haven't found the end of the pattern,
+                                       // and haven't encountered any non-wildcard characters
+                                       while (wildcardSearchPos < pattern.Length && justWildcardsLeft)
+                                       {
+                                               // Check the character at the current position
+                                               char wildchar = pattern[wildcardSearchPos];
+                                               
+                                               // If it's not a wildcard character, then there is more
+                                               // pattern information after this/these wildcards.
+                                               if (wildchar != WILDCARD_CHAR && wildchar != WILDCARD_STRING)
+                                               {
+                                                       justWildcardsLeft = false;
+                                               }
+                                               else
+                                               {
+                                                       // to prevent "cat" matches "ca??"
+                                                       if (wildchar == WILDCARD_CHAR)
+                                                       {
+                                                               return false;
+                                                       }
+                                                       
+                                                       // Look at the next character
+                                                       wildcardSearchPos++;
+                                               }
+                                       }
+                                       
+                                       // This was a prefix wildcard search, and we've matched, so
+                                       // return true.
+                                       if (justWildcardsLeft)
+                                       {
+                                               return true;
+                                       }
+                               }
+                               
+                               // If we've gone past the end of the string, or the pattern,
+                               // return false.
+                               if (sEnd || pEnd)
+                               {
+                                       break;
+                               }
+                               
+                               // Match a single character, so continue.
+                               if (pattern[p] == WILDCARD_CHAR)
+                               {
+                                       continue;
+                               }
+                               
+                               //
+                               if (pattern[p] == WILDCARD_STRING)
+                               {
+                    // Look at the character beyond the '*' characters.
+                    while (p < pattern.Length && pattern[p] == WILDCARD_STRING)
+                        ++p;
+                                       // Examine the string, starting at the last character.
+                                       for (int i = string_Renamed.Length; i >= s; --i)
+                                       {
+                                               if (WildcardEquals(pattern, p, string_Renamed, i))
+                                               {
+                                                       return true;
+                                               }
+                                       }
+                                       break;
+                               }
+                               if (pattern[p] != string_Renamed[s])
+                               {
+                                       break;
+                               }
+                       }
+                       return false;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/.gitattributes b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/.gitattributes
new file mode 100644 (file)
index 0000000..30c0fc9
--- /dev/null
@@ -0,0 +1,31 @@
+/AlreadyClosedException.cs -crlf
+/BufferedIndexInput.cs -crlf
+/BufferedIndexOutput.cs -crlf
+/CheckSumIndexInput.cs -crlf
+/CheckSumIndexOutput.cs -crlf
+/Directory.cs -crlf
+/FSDirectory.cs -crlf
+/FSLockFactory.cs -crlf
+/FileSwitchDirectory.cs -crlf
+/IndexInput.cs -crlf
+/IndexOutput.cs -crlf
+/Lock.cs -crlf
+/LockFactory.cs -crlf
+/LockObtainFailedException.cs -crlf
+/LockReleaseFailedException.cs -crlf
+/LockStressTest.cs -crlf
+/LockVerifyServer.cs -crlf
+/MMapDirectory.cs -crlf
+/NIOFSDirectory.cs -crlf
+/NativeFSLockFactory.cs -crlf
+/NoLockFactory.cs -crlf
+/NoSuchDirectoryException.cs -crlf
+/Package.html -crlf
+/RAMDirectory.cs -crlf
+/RAMFile.cs -crlf
+/RAMInputStream.cs -crlf
+/RAMOutputStream.cs -crlf
+/SimpleFSDirectory.cs -crlf
+/SimpleFSLockFactory.cs -crlf
+/SingleInstanceLockFactory.cs -crlf
+/VerifyingLockFactory.cs -crlf
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/AlreadyClosedException.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/AlreadyClosedException.cs
new file mode 100644 (file)
index 0000000..58a94ca
--- /dev/null
@@ -0,0 +1,38 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+using System.Runtime.Serialization;
+
+namespace Mono.Lucene.Net.Store
+{
+       
+       /// <summary> This exception is thrown when there is an attempt to
+       /// access something that has already been closed.
+       /// </summary>
+       [Serializable]
+       public class AlreadyClosedException:System.SystemException
+       {
+               public AlreadyClosedException(System.String message):base(message)
+               {
+               }
+
+        protected AlreadyClosedException(SerializationInfo info, StreamingContext context) : base(info, context)
+        {
+        }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/BufferedIndexInput.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/BufferedIndexInput.cs
new file mode 100644 (file)
index 0000000..8c25d6b
--- /dev/null
@@ -0,0 +1,241 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Store
+{
+       
+       /// <summary>Base implementation class for buffered {@link IndexInput}. </summary>
+       public abstract class BufferedIndexInput:IndexInput, System.ICloneable
+       {
+               
+               /// <summary>Default buffer size </summary>
+               public const int BUFFER_SIZE = 1024;
+               
+               private int bufferSize = BUFFER_SIZE;
+               
+               protected internal byte[] buffer;
+               
+               private long bufferStart = 0; // position in file of buffer
+               private int bufferLength = 0; // end of valid bytes
+               private int bufferPosition = 0; // next byte to read
+               
+               public override byte ReadByte()
+               {
+                       if (bufferPosition >= bufferLength)
+                               Refill();
+                       return buffer[bufferPosition++];
+               }
+               
+               public BufferedIndexInput()
+               {
+               }
+               
+               /// <summary>Inits BufferedIndexInput with a specific bufferSize </summary>
+               public BufferedIndexInput(int bufferSize)
+               {
+                       CheckBufferSize(bufferSize);
+                       this.bufferSize = bufferSize;
+               }
+               
+               /// <summary>Change the buffer size used by this IndexInput </summary>
+               public virtual void  SetBufferSize(int newSize)
+               {
+                       System.Diagnostics.Debug.Assert(buffer == null || bufferSize == buffer.Length, "buffer=" + buffer + " bufferSize=" + bufferSize + " buffer.length=" +(buffer != null ? buffer.Length: 0));
+                       if (newSize != bufferSize)
+                       {
+                               CheckBufferSize(newSize);
+                               bufferSize = newSize;
+                               if (buffer != null)
+                               {
+                                       // Resize the existing buffer and carefully save as
+                                       // many bytes as possible starting from the current
+                                       // bufferPosition
+                                       byte[] newBuffer = new byte[newSize];
+                                       int leftInBuffer = bufferLength - bufferPosition;
+                                       int numToCopy;
+                                       if (leftInBuffer > newSize)
+                                               numToCopy = newSize;
+                                       else
+                                               numToCopy = leftInBuffer;
+                                       Array.Copy(buffer, bufferPosition, newBuffer, 0, numToCopy);
+                                       bufferStart += bufferPosition;
+                                       bufferPosition = 0;
+                                       bufferLength = numToCopy;
+                                       NewBuffer(newBuffer);
+                               }
+                       }
+               }
+               
+               protected internal virtual void  NewBuffer(byte[] newBuffer)
+               {
+                       // Subclasses can do something here
+                       buffer = newBuffer;
+               }
+               
+               /// <seealso cref="setBufferSize">
+               /// </seealso>
+               public virtual int GetBufferSize()
+               {
+                       return bufferSize;
+               }
+               
+               private void  CheckBufferSize(int bufferSize)
+               {
+                       if (bufferSize <= 0)
+                               throw new System.ArgumentException("bufferSize must be greater than 0 (got " + bufferSize + ")");
+               }
+               
+               public override void  ReadBytes(byte[] b, int offset, int len)
+               {
+                       ReadBytes(b, offset, len, true);
+               }
+               
+               public override void  ReadBytes(byte[] b, int offset, int len, bool useBuffer)
+               {
+                       
+                       if (len <= (bufferLength - bufferPosition))
+                       {
+                               // the buffer contains enough data to satisfy this request
+                               if (len > 0)
+                               // to allow b to be null if len is 0...
+                                       Array.Copy(buffer, bufferPosition, b, offset, len);
+                               bufferPosition += len;
+                       }
+                       else
+                       {
+                               // the buffer does not have enough data. First serve all we've got.
+                               int available = bufferLength - bufferPosition;
+                               if (available > 0)
+                               {
+                                       Array.Copy(buffer, bufferPosition, b, offset, available);
+                                       offset += available;
+                                       len -= available;
+                                       bufferPosition += available;
+                               }
+                               // and now, read the remaining 'len' bytes:
+                               if (useBuffer && len < bufferSize)
+                               {
+                                       // If the amount left to read is small enough, and
+                                       // we are allowed to use our buffer, do it in the usual
+                                       // buffered way: fill the buffer and copy from it:
+                                       Refill();
+                                       if (bufferLength < len)
+                                       {
+                                               // Throw an exception when refill() could not read len bytes:
+                                               Array.Copy(buffer, 0, b, offset, bufferLength);
+                                               throw new System.IO.IOException("read past EOF");
+                                       }
+                                       else
+                                       {
+                                               Array.Copy(buffer, 0, b, offset, len);
+                                               bufferPosition = len;
+                                       }
+                               }
+                               else
+                               {
+                                       // The amount left to read is larger than the buffer
+                                       // or we've been asked to not use our buffer -
+                                       // there's no performance reason not to read it all
+                                       // at once. Note that unlike the previous code of
+                                       // this function, there is no need to do a seek
+                                       // here, because there's no need to reread what we
+                                       // had in the buffer.
+                                       long after = bufferStart + bufferPosition + len;
+                                       if (after > Length())
+                                               throw new System.IO.IOException("read past EOF");
+                                       ReadInternal(b, offset, len);
+                                       bufferStart = after;
+                                       bufferPosition = 0;
+                                       bufferLength = 0; // trigger refill() on read
+                               }
+                       }
+               }
+               
+               private void  Refill()
+               {
+                       long start = bufferStart + bufferPosition;
+                       long end = start + bufferSize;
+                       if (end > Length())
+                       // don't read past EOF
+                               end = Length();
+                       int newLength = (int) (end - start);
+                       if (newLength <= 0)
+                               throw new System.IO.IOException("read past EOF");
+                       
+                       if (buffer == null)
+                       {
+                               NewBuffer(new byte[bufferSize]); // allocate buffer lazily
+                               SeekInternal(bufferStart);
+                       }
+                       ReadInternal(buffer, 0, newLength);
+                       bufferLength = newLength;
+                       bufferStart = start;
+                       bufferPosition = 0;
+               }
+               
+               /// <summary>Expert: implements buffer refill.  Reads bytes from the current position
+               /// in the input.
+               /// </summary>
+               /// <param name="b">the array to read bytes into
+               /// </param>
+               /// <param name="offset">the offset in the array to start storing bytes
+               /// </param>
+               /// <param name="length">the number of bytes to read
+               /// </param>
+               public abstract void  ReadInternal(byte[] b, int offset, int length);
+               
+               public override long GetFilePointer()
+               {
+                       return bufferStart + bufferPosition;
+               }
+               
+               public override void  Seek(long pos)
+               {
+                       if (pos >= bufferStart && pos < (bufferStart + bufferLength))
+                               bufferPosition = (int) (pos - bufferStart);
+                       // seek within buffer
+                       else
+                       {
+                               bufferStart = pos;
+                               bufferPosition = 0;
+                               bufferLength = 0; // trigger refill() on read()
+                               SeekInternal(pos);
+                       }
+               }
+               
+               /// <summary>Expert: implements seek.  Sets current position in this file, where the
+               /// next {@link #ReadInternal(byte[],int,int)} will occur.
+               /// </summary>
+               /// <seealso cref="ReadInternal(byte[],int,int)">
+               /// </seealso>
+               public abstract void  SeekInternal(long pos);
+               
+               public override System.Object Clone()
+               {
+                       BufferedIndexInput clone = (BufferedIndexInput) base.Clone();
+                       
+                       clone.buffer = null;
+                       clone.bufferLength = 0;
+                       clone.bufferPosition = 0;
+                       clone.bufferStart = GetFilePointer();
+                       
+                       return clone;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/BufferedIndexOutput.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/BufferedIndexOutput.cs
new file mode 100644 (file)
index 0000000..04c6bf5
--- /dev/null
@@ -0,0 +1,156 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Store
+{
+       
+       /// <summary>Base implementation class for buffered {@link IndexOutput}. </summary>
+       public abstract class BufferedIndexOutput:IndexOutput
+       {
+               internal const int BUFFER_SIZE = 16384;
+               
+               private byte[] buffer = new byte[BUFFER_SIZE];
+               private long bufferStart = 0; // position in file of buffer
+               private int bufferPosition = 0; // position in buffer
+               
+               /// <summary>Writes a single byte.</summary>
+               /// <seealso cref="IndexInput.ReadByte()">
+               /// </seealso>
+               public override void  WriteByte(byte b)
+               {
+                       if (bufferPosition >= BUFFER_SIZE)
+                               Flush();
+                       buffer[bufferPosition++] = b;
+               }
+               
+               /// <summary>Writes an array of bytes.</summary>
+               /// <param name="b">the bytes to write
+               /// </param>
+               /// <param name="length">the number of bytes to write
+               /// </param>
+               /// <seealso cref="IndexInput.ReadBytes(byte[],int,int)">
+               /// </seealso>
+               public override void  WriteBytes(byte[] b, int offset, int length)
+               {
+                       int bytesLeft = BUFFER_SIZE - bufferPosition;
+                       // is there enough space in the buffer?
+                       if (bytesLeft >= length)
+                       {
+                               // we add the data to the end of the buffer
+                               Array.Copy(b, offset, buffer, bufferPosition, length);
+                               bufferPosition += length;
+                               // if the buffer is full, flush it
+                               if (BUFFER_SIZE - bufferPosition == 0)
+                                       Flush();
+                       }
+                       else
+                       {
+                               // is data larger then buffer?
+                               if (length > BUFFER_SIZE)
+                               {
+                                       // we flush the buffer
+                                       if (bufferPosition > 0)
+                                               Flush();
+                                       // and write data at once
+                                       FlushBuffer(b, offset, length);
+                                       bufferStart += length;
+                               }
+                               else
+                               {
+                                       // we fill/flush the buffer (until the input is written)
+                                       int pos = 0; // position in the input data
+                                       int pieceLength;
+                                       while (pos < length)
+                                       {
+                                               pieceLength = (length - pos < bytesLeft)?length - pos:bytesLeft;
+                                               Array.Copy(b, pos + offset, buffer, bufferPosition, pieceLength);
+                                               pos += pieceLength;
+                                               bufferPosition += pieceLength;
+                                               // if the buffer is full, flush it
+                                               bytesLeft = BUFFER_SIZE - bufferPosition;
+                                               if (bytesLeft == 0)
+                                               {
+                                                       Flush();
+                                                       bytesLeft = BUFFER_SIZE;
+                                               }
+                                       }
+                               }
+                       }
+               }
+               
+               /// <summary>Forces any buffered output to be written. </summary>
+               public override void  Flush()
+               {
+                       FlushBuffer(buffer, bufferPosition);
+                       bufferStart += bufferPosition;
+                       bufferPosition = 0;
+               }
+               
+               /// <summary>Expert: implements buffer write.  Writes bytes at the current position in
+               /// the output.
+               /// </summary>
+               /// <param name="b">the bytes to write
+               /// </param>
+               /// <param name="len">the number of bytes to write
+               /// </param>
+               private void  FlushBuffer(byte[] b, int len)
+               {
+                       FlushBuffer(b, 0, len);
+               }
+               
+               /// <summary>Expert: implements buffer write.  Writes bytes at the current position in
+               /// the output.
+               /// </summary>
+               /// <param name="b">the bytes to write
+               /// </param>
+               /// <param name="offset">the offset in the byte array
+               /// </param>
+               /// <param name="len">the number of bytes to write
+               /// </param>
+               public abstract void  FlushBuffer(byte[] b, int offset, int len);
+               
+               /// <summary>Closes this stream to further operations. </summary>
+               public override void  Close()
+               {
+                       Flush();
+               }
+               
+               /// <summary>Returns the current position in this file, where the next write will
+               /// occur.
+               /// </summary>
+               /// <seealso cref="Seek(long)">
+               /// </seealso>
+               public override long GetFilePointer()
+               {
+                       return bufferStart + bufferPosition;
+               }
+               
+               /// <summary>Sets current position in this file, where the next write will occur.</summary>
+               /// <seealso cref="GetFilePointer()">
+               /// </seealso>
+               public override void  Seek(long pos)
+               {
+                       Flush();
+                       bufferStart = pos;
+               }
+               
+               /// <summary>The number of bytes in the file. </summary>
+               public abstract override long Length();
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/CheckSumIndexInput.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/CheckSumIndexInput.cs
new file mode 100644 (file)
index 0000000..7decafd
--- /dev/null
@@ -0,0 +1,83 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Store
+{
+       
+       /// <summary>Writes bytes through to a primary IndexOutput, computing
+       /// checksum as it goes. Note that you cannot use seek(). 
+       /// </summary>
+       public class ChecksumIndexInput:IndexInput
+       {
+               internal IndexInput main;
+               internal SupportClass.Checksum digest;
+               
+               public ChecksumIndexInput(IndexInput main)
+               {
+                       this.main = main;
+            digest = new SupportClass.CRC32();
+               }
+               
+               public override byte ReadByte()
+               {
+                       byte b = main.ReadByte();
+                       digest.Update(b);
+                       return b;
+               }
+               
+               public override void  ReadBytes(byte[] b, int offset, int len)
+               {
+                       main.ReadBytes(b, offset, len);
+                       digest.Update(b, offset, len);
+               }
+               
+               
+               public virtual long GetChecksum()
+               {
+                       return digest.GetValue();
+               }
+               
+               public override void  Close()
+               {
+                       main.Close();
+               }
+               
+               public override long GetFilePointer()
+               {
+                       return main.GetFilePointer();
+               }
+               
+               public override void  Seek(long pos)
+               {
+                       throw new System.SystemException("not allowed");
+               }
+               
+               public override long Length()
+               {
+                       return main.Length();
+               }
+
+        /*
+               override public System.Object Clone()
+               {
+                       return null;
+               }
+        */
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/CheckSumIndexOutput.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/CheckSumIndexOutput.cs
new file mode 100644 (file)
index 0000000..a2ca99c
--- /dev/null
@@ -0,0 +1,107 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using Checksum = SupportClass.Checksum;
+using CRC32 = SupportClass.CRC32;
+
+namespace Mono.Lucene.Net.Store
+{
+       
+       /// <summary>Writes bytes through to a primary IndexOutput, computing
+       /// checksum.  Note that you cannot use seek().
+       /// </summary>
+       public class ChecksumIndexOutput:IndexOutput
+       {
+               internal IndexOutput main;
+               internal Checksum digest;
+               
+               public ChecksumIndexOutput(IndexOutput main)
+               {
+                       this.main = main;
+                       digest = new CRC32();
+               }
+               
+               public override void  WriteByte(byte b)
+               {
+                       digest.Update(b);
+                       main.WriteByte(b);
+               }
+               
+               public override void  WriteBytes(byte[] b, int offset, int length)
+               {
+                       digest.Update(b, offset, length);
+                       main.WriteBytes(b, offset, length);
+               }
+               
+               public virtual long GetChecksum()
+               {
+                       return digest.GetValue();
+               }
+               
+               public override void  Flush()
+               {
+                       main.Flush();
+               }
+               
+               public override void  Close()
+               {
+                       main.Close();
+               }
+               
+               public override long GetFilePointer()
+               {
+                       return main.GetFilePointer();
+               }
+               
+               public override void  Seek(long pos)
+               {
+                       throw new System.SystemException("not allowed");
+               }
+               
+               /// <summary> Starts but does not complete the commit of this file (=
+               /// writing of the final checksum at the end).  After this
+               /// is called must call {@link #finishCommit} and the
+               /// {@link #close} to complete the commit.
+               /// </summary>
+               public virtual void  PrepareCommit()
+               {
+                       long checksum = GetChecksum();
+                       // Intentionally write a mismatched checksum.  This is
+                       // because we want to 1) test, as best we can, that we
+                       // are able to write a long to the file, but 2) not
+                       // actually "commit" the file yet.  This (prepare
+                       // commit) is phase 1 of a two-phase commit.
+                       long pos = main.GetFilePointer();
+                       main.WriteLong(checksum - 1);
+                       main.Flush();
+                       main.Seek(pos);
+               }
+               
+               /// <summary>See {@link #prepareCommit} </summary>
+               public virtual void  FinishCommit()
+               {
+                       main.WriteLong(GetChecksum());
+               }
+               
+               public override long Length()
+               {
+                       return main.Length();
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/Directory.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/Directory.cs
new file mode 100644 (file)
index 0000000..480dd42
--- /dev/null
@@ -0,0 +1,282 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexFileNameFilter = Mono.Lucene.Net.Index.IndexFileNameFilter;
+
+namespace Mono.Lucene.Net.Store
+{
+       
+       /// <summary>A Directory is a flat list of files.  Files may be written once, when they
+       /// are created.  Once a file is created it may only be opened for read, or
+       /// deleted.  Random access is permitted both when reading and writing.
+       /// 
+       /// <p/> Java's i/o APIs not used directly, but rather all i/o is
+       /// through this API.  This permits things such as: <ul>
+       /// <li> implementation of RAM-based indices;</li>
+       /// <li> implementation indices stored in a database, via JDBC;</li>
+       /// <li> implementation of an index as a single file;</li>
+       /// </ul>
+       /// 
+       /// Directory locking is implemented by an instance of {@link
+       /// LockFactory}, and can be changed for each Directory
+       /// instance using {@link #setLockFactory}.
+       /// 
+       /// </summary>
+       [Serializable]
+       public abstract class Directory : System.IDisposable
+       {
+               protected internal volatile bool isOpen = true;
+               
+               /// <summary>Holds the LockFactory instance (implements locking for
+               /// this Directory instance). 
+               /// </summary>
+               [NonSerialized]
+               protected internal LockFactory lockFactory;
+               
+               /// <deprecated> For some Directory implementations ({@link
+               /// FSDirectory}, and its subclasses), this method
+               /// silently filters its results to include only index
+               /// files.  Please use {@link #listAll} instead, which
+               /// does no filtering. 
+               /// </deprecated>
+        [Obsolete("For some Directory implementations (FSDirectory}, and its subclasses), this method silently filters its results to include only index files.  Please use ListAll instead, which does no filtering. ")]
+               public abstract System.String[] List();
+               
+               /// <summary>Returns an array of strings, one for each file in the
+               /// directory.  Unlike {@link #list} this method does no
+               /// filtering of the contents in a directory, and it will
+               /// never return null (throws IOException instead).
+               /// 
+               /// Currently this method simply fallsback to {@link
+               /// #list} for Directory impls outside of Lucene's core &amp;
+               /// contrib, but in 3.0 that method will be removed and
+               /// this method will become abstract. 
+               /// </summary>
+               public virtual System.String[] ListAll()
+               {
+                       return List();
+               }
+               
+               /// <summary>Returns true iff a file with the given name exists. </summary>
+               public abstract bool FileExists(System.String name);
+               
+               /// <summary>Returns the time the named file was last modified. </summary>
+               public abstract long FileModified(System.String name);
+               
+               /// <summary>Set the modified time of an existing file to now. </summary>
+               public abstract void  TouchFile(System.String name);
+               
+               /// <summary>Removes an existing file in the directory. </summary>
+               public abstract void  DeleteFile(System.String name);
+               
+               /// <summary>Renames an existing file in the directory.
+               /// If a file already exists with the new name, then it is replaced.
+               /// This replacement is not guaranteed to be atomic.
+               /// </summary>
+               /// <deprecated> 
+               /// </deprecated>
+        [Obsolete]
+               public abstract void  RenameFile(System.String from, System.String to);
+               
+               /// <summary>Returns the length of a file in the directory. </summary>
+               public abstract long FileLength(System.String name);
+               
+               
+               /// <summary>Creates a new, empty file in the directory with the given name.
+               /// Returns a stream writing this file. 
+               /// </summary>
+               public abstract IndexOutput CreateOutput(System.String name);
+               
+               /// <summary>Ensure that any writes to this file are moved to
+               /// stable storage.  Lucene uses this to properly commit
+               /// changes to the index, to prevent a machine/OS crash
+               /// from corrupting the index. 
+               /// </summary>
+               public virtual void  Sync(System.String name)
+               {
+               }
+               
+               /// <summary>Returns a stream reading an existing file. </summary>
+               public abstract IndexInput OpenInput(System.String name);
+               
+               /// <summary>Returns a stream reading an existing file, with the
+               /// specified read buffer size.  The particular Directory
+               /// implementation may ignore the buffer size.  Currently
+               /// the only Directory implementations that respect this
+               /// parameter are {@link FSDirectory} and {@link
+               /// Mono.Lucene.Net.Index.CompoundFileReader}.
+               /// </summary>
+               public virtual IndexInput OpenInput(System.String name, int bufferSize)
+               {
+                       return OpenInput(name);
+               }
+               
+               /// <summary>Construct a {@link Lock}.</summary>
+               /// <param name="name">the name of the lock file
+               /// </param>
+               public virtual Lock MakeLock(System.String name)
+               {
+                       return lockFactory.MakeLock(name);
+               }
+               /// <summary> Attempt to clear (forcefully unlock and remove) the
+               /// specified lock.  Only call this at a time when you are
+               /// certain this lock is no longer in use.
+               /// </summary>
+               /// <param name="name">name of the lock to be cleared.
+               /// </param>
+               public virtual void  ClearLock(System.String name)
+               {
+                       if (lockFactory != null)
+                       {
+                               lockFactory.ClearLock(name);
+                       }
+               }
+               
+               /// <summary>Closes the store. </summary>
+               public abstract void  Close();
+
+        public abstract void Dispose();
+               
+               /// <summary> Set the LockFactory that this Directory instance should
+               /// use for its locking implementation.  Each * instance of
+               /// LockFactory should only be used for one directory (ie,
+               /// do not share a single instance across multiple
+               /// Directories).
+               /// 
+               /// </summary>
+               /// <param name="lockFactory">instance of {@link LockFactory}.
+               /// </param>
+               public virtual void  SetLockFactory(LockFactory lockFactory)
+               {
+                       this.lockFactory = lockFactory;
+                       lockFactory.SetLockPrefix(this.GetLockID());
+               }
+               
+               /// <summary> Get the LockFactory that this Directory instance is
+               /// using for its locking implementation.  Note that this
+               /// may be null for Directory implementations that provide
+               /// their own locking implementation.
+               /// </summary>
+               public virtual LockFactory GetLockFactory()
+               {
+                       return this.lockFactory;
+               }
+               
+               /// <summary> Return a string identifier that uniquely differentiates
+               /// this Directory instance from other Directory instances.
+               /// This ID should be the same if two Directory instances
+               /// (even in different JVMs and/or on different machines)
+               /// are considered "the same index".  This is how locking
+               /// "scopes" to the right index.
+               /// </summary>
+               public virtual System.String GetLockID()
+               {
+                       return this.ToString();
+               }
+
+        public override string ToString()
+        {
+            return base.ToString() + " lockFactory=" + GetLockFactory();
+        }
+               
+               /// <summary> Copy contents of a directory src to a directory dest.
+               /// If a file in src already exists in dest then the
+               /// one in dest will be blindly overwritten.
+               /// 
+               /// <p/><b>NOTE:</b> the source directory cannot change
+               /// while this method is running.  Otherwise the results
+               /// are undefined and you could easily hit a
+               /// FileNotFoundException.
+               /// 
+               /// <p/><b>NOTE:</b> this method only copies files that look
+               /// like index files (ie, have extensions matching the
+               /// known extensions of index files).
+               /// 
+               /// </summary>
+               /// <param name="src">source directory
+               /// </param>
+               /// <param name="dest">destination directory
+               /// </param>
+               /// <param name="closeDirSrc">if <code>true</code>, call {@link #Close()} method on source directory
+               /// </param>
+               /// <throws>  IOException </throws>
+               public static void  Copy(Directory src, Directory dest, bool closeDirSrc)
+               {
+                       System.String[] files = src.ListAll();
+                       
+                       IndexFileNameFilter filter = IndexFileNameFilter.GetFilter();
+                       
+                       byte[] buf = new byte[BufferedIndexOutput.BUFFER_SIZE];
+                       for (int i = 0; i < files.Length; i++)
+                       {
+                               
+                               if (!filter.Accept(null, files[i]))
+                                       continue;
+                               
+                               IndexOutput os = null;
+                               IndexInput is_Renamed = null;
+                               try
+                               {
+                                       // create file in dest directory
+                                       os = dest.CreateOutput(files[i]);
+                                       // read current file
+                                       is_Renamed = src.OpenInput(files[i]);
+                                       // and copy to dest directory
+                                       long len = is_Renamed.Length();
+                                       long readCount = 0;
+                                       while (readCount < len)
+                                       {
+                                               int toRead = readCount + BufferedIndexOutput.BUFFER_SIZE > len?(int) (len - readCount):BufferedIndexOutput.BUFFER_SIZE;
+                                               is_Renamed.ReadBytes(buf, 0, toRead);
+                                               os.WriteBytes(buf, toRead);
+                                               readCount += toRead;
+                                       }
+                               }
+                               finally
+                               {
+                                       // graceful cleanup
+                                       try
+                                       {
+                                               if (os != null)
+                                                       os.Close();
+                                       }
+                                       finally
+                                       {
+                                               if (is_Renamed != null)
+                                                       is_Renamed.Close();
+                                       }
+                               }
+                       }
+                       if (closeDirSrc)
+                               src.Close();
+               }
+               
+               /// <throws>  AlreadyClosedException if this Directory is closed </throws>
+               public /*protected internal*/ void  EnsureOpen()
+               {
+                       if (!isOpen)
+                               throw new AlreadyClosedException("this Directory is closed");
+               }
+
+        public bool isOpen_ForNUnit
+        {
+            get { return isOpen; }
+        }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/FSDirectory.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/FSDirectory.cs
new file mode 100644 (file)
index 0000000..73f0131
--- /dev/null
@@ -0,0 +1,1111 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+using System.Collections.Generic;
+
+// Used only for WRITE_LOCK_NAME in deprecated create=true case:
+using IndexFileNameFilter = Mono.Lucene.Net.Index.IndexFileNameFilter;
+using IndexWriter = Mono.Lucene.Net.Index.IndexWriter;
+using Constants = Mono.Lucene.Net.Util.Constants;
+
+namespace Mono.Lucene.Net.Store
+{
+       
+       /// <summary> <a name="subclasses"/>
+       /// Base class for Directory implementations that store index
+       /// files in the file system.  There are currently three core
+       /// subclasses:
+       /// 
+       /// <ul>
+       /// 
+       /// <li> {@link SimpleFSDirectory} is a straightforward
+       /// implementation using java.io.RandomAccessFile.
+       /// However, it has poor concurrent performance
+       /// (multiple threads will bottleneck) as it
+       /// synchronizes when multiple threads read from the
+       /// same file.</li>
+       /// 
+       /// <li> {@link NIOFSDirectory} uses java.nio's
+       /// FileChannel's positional io when reading to avoid
+       /// synchronization when reading from the same file.
+       /// Unfortunately, due to a Windows-only <a
+       /// href="http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=6265734">Sun
+       /// JRE bug</a> this is a poor choice for Windows, but
+       /// on all other platforms this is the preferred
+       /// choice. Applications using {@link Thread#interrupt()} or
+    /// <code>Future#cancel(boolean)</code> (on Java 1.5) should use
+    /// {@link SimpleFSDirectory} instead. See {@link NIOFSDirectory} java doc
+    /// for details.
+    ///        
+    ///        
+       /// 
+       /// <li> {@link MMapDirectory} uses memory-mapped IO when
+       /// reading. This is a good choice if you have plenty
+       /// of virtual memory relative to your index size, eg
+       /// if you are running on a 64 bit JRE, or you are
+       /// running on a 32 bit JRE but your index sizes are
+       /// small enough to fit into the virtual memory space.
+       /// Java has currently the limitation of not being able to
+       /// unmap files from user code. The files are unmapped, when GC
+       /// releases the byte buffers. Due to
+       /// <a href="http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=4724038">
+       /// this bug</a> in Sun's JRE, MMapDirectory's {@link IndexInput#close}
+       /// is unable to close the underlying OS file handle. Only when
+       /// GC finally collects the underlying objects, which could be
+       /// quite some time later, will the file handle be closed.
+       /// This will consume additional transient disk usage: on Windows,
+       /// attempts to delete or overwrite the files will result in an
+       /// exception; on other platforms, which typically have a &quot;delete on
+       /// last close&quot; semantics, while such operations will succeed, the bytes
+       /// are still consuming space on disk.  For many applications this
+       /// limitation is not a problem (e.g. if you have plenty of disk space,
+       /// and you don't rely on overwriting files on Windows) but it's still
+       /// an important limitation to be aware of. This class supplies a
+       /// (possibly dangerous) workaround mentioned in the bug report,
+       /// which may fail on non-Sun JVMs.</li>
+    ///       
+    /// Applications using {@link Thread#interrupt()} or
+    /// <code>Future#cancel(boolean)</code> (on Java 1.5) should use
+    /// {@link SimpleFSDirectory} instead. See {@link MMapDirectory}
+    /// java doc for details.
+       /// </ul>
+       /// 
+       /// Unfortunately, because of system peculiarities, there is
+       /// no single overall best implementation.  Therefore, we've
+       /// added the {@link #open} method, to allow Lucene to choose
+       /// the best FSDirectory implementation given your
+       /// environment, and the known limitations of each
+       /// implementation.  For users who have no reason to prefer a
+       /// specific implementation, it's best to simply use {@link
+       /// #open}.  For all others, you should instantiate the
+       /// desired implementation directly.
+       /// 
+       /// <p/>The locking implementation is by default {@link
+       /// NativeFSLockFactory}, but can be changed by
+       /// passing in a custom {@link LockFactory} instance.
+       /// The deprecated <code>getDirectory</code> methods default to use
+       /// {@link SimpleFSLockFactory} for backwards compatibility.
+       /// The system properties 
+       /// <code>org.apache.lucene.store.FSDirectoryLockFactoryClass</code>
+       /// and <code>org.apache.lucene.FSDirectory.class</code>
+       /// are deprecated and only used by the deprecated
+       /// <code>getDirectory</code> methods. The system property
+       /// <code>org.apache.lucene.lockDir</code> is ignored completely,
+       /// If you really want to store locks
+       /// elsewhere, you can create your own {@link
+       /// SimpleFSLockFactory} (or {@link NativeFSLockFactory},
+       /// etc.) passing in your preferred lock directory.
+       /// 
+       /// <p/><em>In 3.0 this class will become abstract.</em>
+       /// 
+       /// </summary>
+       /// <seealso cref="Directory">
+       /// </seealso>
+       // TODO: in 3.0 this will become an abstract base class
+       public class FSDirectory:Directory
+       {
+               
+               /// <summary>This cache of directories ensures that there is a unique Directory
+               /// instance per path, so that synchronization on the Directory can be used to
+               /// synchronize access between readers and writers.  We use
+               /// refcounts to ensure when the last use of an FSDirectory
+               /// instance for a given canonical path is closed, we remove the
+               /// instance from the cache.  See LUCENE-776
+               /// for some relevant discussion.
+               /// </summary>
+               /// <deprecated> Not used by any non-deprecated methods anymore
+               /// </deprecated>
+        [Obsolete("Not used by any non-deprecated methods anymore")]
+        private static readonly Dictionary<string, FSDirectory> DIRECTORIES = new Dictionary<string, FSDirectory>();
+               
+               private static bool disableLocks = false;
+               
+               // TODO: should this move up to the Directory base class?  Also: should we
+               // make a per-instance (in addition to the static "default") version?
+               
+               /// <summary> Set whether Lucene's use of lock files is disabled. By default, 
+               /// lock files are enabled. They should only be disabled if the index
+               /// is on a read-only medium like a CD-ROM.
+               /// </summary>
+               /// <deprecated> Use a {@link #open(File, LockFactory)} or a constructor
+               /// that takes a {@link LockFactory} and supply
+               /// {@link NoLockFactory#getNoLockFactory}. This setting does not work
+               /// with {@link #open(File)} only the deprecated <code>getDirectory</code>
+               /// respect this setting.   
+               /// </deprecated>
+        [Obsolete("Use a Open(File, LockFactory) or a constructor that takes a LockFactory and supply NoLockFactory.GetNoLockFactory. This setting does not work with Open(File) only the deprecated GetDirectory respect this setting.")]
+               public static void  SetDisableLocks(bool doDisableLocks)
+               {
+                       FSDirectory.disableLocks = doDisableLocks;
+               }
+               
+               /// <summary> Returns whether Lucene's use of lock files is disabled.</summary>
+               /// <returns> true if locks are disabled, false if locks are enabled.
+               /// </returns>
+               /// <seealso cref="setDisableLocks">
+               /// </seealso>
+               /// <deprecated> Use a constructor that takes a {@link LockFactory} and
+               /// supply {@link NoLockFactory#getNoLockFactory}.
+               /// </deprecated>
+        [Obsolete("Use a constructor that takes a LockFactory and supply NoLockFactory.GetNoLockFactory.")]
+               public static bool GetDisableLocks()
+               {
+                       return FSDirectory.disableLocks;
+               }
+               
+               /// <summary> Directory specified by <code>org.apache.lucene.lockDir</code>
+               /// or <code>java.io.tmpdir</code> system property.
+               /// </summary>
+               /// <deprecated> As of 2.1, <code>LOCK_DIR</code> is unused
+               /// because the write.lock is now stored by default in the
+               /// index directory.  If you really want to store locks
+               /// elsewhere, you can create your own {@link
+               /// SimpleFSLockFactory} (or {@link NativeFSLockFactory},
+               /// etc.) passing in your preferred lock directory.  Then,
+               /// pass this <code>LockFactory</code> instance to one of
+               /// the <code>open</code> methods that take a
+               /// <code>lockFactory</code> (for example, {@link #open(File, LockFactory)}).
+               /// </deprecated>
+        //[Obsolete("As of 2.1, LOCK_DIR is unused because the write.lock is now stored by default in the index directory. ")]
+               //public static readonly System.String LOCK_DIR = SupportClass.AppSettings.Get("Mono.Lucene.Net.lockDir", System.IO.Path.GetTempPath());
+               
+               /// <summary>The default class which implements filesystem-based directories. </summary>
+               // deprecated
+        [Obsolete]
+        private static readonly System.Type IMPL = typeof(Mono.Lucene.Net.Store.SimpleFSDirectory);
+               
+               private static System.Security.Cryptography.HashAlgorithm DIGESTER;
+               
+               /// <summary>A buffer optionally used in renameTo method </summary>
+               private byte[] buffer = null;
+               
+               
+               /// <summary>Returns the directory instance for the named location.
+               /// 
+               /// </summary>
+               /// <deprecated> Use {@link #Open(File)}
+               /// 
+               /// </deprecated>
+               /// <param name="path">the path to the directory.
+               /// </param>
+               /// <returns> the FSDirectory for the named file.  
+               /// </returns>
+        [Obsolete("Use Open(File)")]
+               public static FSDirectory GetDirectory(System.String path)
+               {
+                       return GetDirectory(new System.IO.DirectoryInfo(path), null);
+               }
+               
+               /// <summary>Returns the directory instance for the named location.
+               /// 
+               /// </summary>
+               /// <deprecated> Use {@link #Open(File, LockFactory)}
+               /// 
+               /// </deprecated>
+               /// <param name="path">the path to the directory.
+               /// </param>
+               /// <param name="lockFactory">instance of {@link LockFactory} providing the
+               /// locking implementation.
+               /// </param>
+               /// <returns> the FSDirectory for the named file.  
+               /// </returns>
+        [Obsolete("Use Open(File, LockFactory)")]
+               public static FSDirectory GetDirectory(System.String path, LockFactory lockFactory)
+               {
+                       return GetDirectory(new System.IO.DirectoryInfo(path), lockFactory);
+               }
+               
+               /// <summary>Returns the directory instance for the named location.
+               /// 
+               /// </summary>
+               /// <deprecated> Use {@link #Open(File)}
+               /// 
+               /// </deprecated>
+               /// <param name="file">the path to the directory.
+               /// </param>
+               /// <returns> the FSDirectory for the named file.  
+               /// </returns>
+        [Obsolete("Use Open(File)")]
+               public static FSDirectory GetDirectory(System.IO.DirectoryInfo file)
+               {
+                       return GetDirectory(file, null);
+               }
+
+        /// <summary>Returns the directory instance for the named location.
+        /// 
+        /// </summary>
+        /// <deprecated> Use {@link #Open(File)}
+        /// 
+        /// </deprecated>
+        /// <param name="file">the path to the directory.
+        /// </param>
+        /// <returns> the FSDirectory for the named file.  
+        /// </returns>
+        [System.Obsolete("Use the constructor that takes a DirectoryInfo, this will be removed in the 3.0 release")]
+        public static FSDirectory GetDirectory(System.IO.FileInfo file)
+        {
+            return GetDirectory(new System.IO.DirectoryInfo(file.FullName), null);
+        }
+               
+               /// <summary>Returns the directory instance for the named location.
+               /// 
+               /// </summary>
+               /// <deprecated> Use {@link #Open(File, LockFactory)}
+               /// 
+               /// </deprecated>
+               /// <param name="file">the path to the directory.
+               /// </param>
+               /// <param name="lockFactory">instance of {@link LockFactory} providing the
+               /// locking implementation.
+               /// </param>
+               /// <returns> the FSDirectory for the named file.  
+               /// </returns>
+               [System.Obsolete("Use the constructor that takes a DirectoryInfo, this will be removed in the 3.0 release")]
+               public static FSDirectory GetDirectory(System.IO.FileInfo file, LockFactory lockFactory)
+               {
+            return GetDirectory(new System.IO.DirectoryInfo(file.FullName), lockFactory);
+               }
+
+        /// <summary>Returns the directory instance for the named location.
+        /// 
+        /// </summary>
+        /// <deprecated> Use {@link #Open(File, LockFactory)}
+        /// 
+        /// </deprecated>
+        /// <param name="file">the path to the directory.
+        /// </param>
+        /// <param name="lockFactory">instance of {@link LockFactory} providing the
+        /// locking implementation.
+        /// </param>
+        /// <returns> the FSDirectory for the named file.  
+        /// </returns>
+        [Obsolete("Use Open(File, LockFactory)")]
+        public static FSDirectory GetDirectory(System.IO.DirectoryInfo file, LockFactory lockFactory)
+        {
+            FSDirectory dir;
+            lock (DIRECTORIES)
+            {
+                if(!DIRECTORIES.TryGetValue(file.FullName, out dir))
+                {
+                    try
+                    {
+                        dir = (FSDirectory)System.Activator.CreateInstance(IMPL, true);
+                    }
+                    catch (System.Exception e)
+                    {
+                        throw new System.SystemException("cannot load FSDirectory class: " + e.ToString(), e);
+                    }
+                    dir.Init(file, lockFactory);
+                    DIRECTORIES.Add(file.FullName, dir);
+                }
+                else
+                {
+                    // Catch the case where a Directory is pulled from the cache, but has a
+                    // different LockFactory instance.
+                    if (lockFactory != null && lockFactory != dir.GetLockFactory())
+                    {
+                        throw new System.IO.IOException("Directory was previously created with a different LockFactory instance; please pass null as the lockFactory instance and use setLockFactory to change it");
+                    }
+                    dir.checked_Renamed = false;
+                }
+            }
+            lock (dir)
+            {
+                dir.refCount++;
+            }
+            return dir;
+        }
+               
+               
+               /// <summary>Returns the directory instance for the named location.
+               /// 
+               /// </summary>
+               /// <deprecated> Use IndexWriter's create flag, instead, to
+               /// create a new index.
+               /// 
+               /// </deprecated>
+               /// <param name="path">the path to the directory.
+               /// </param>
+               /// <param name="create">if true, create, or erase any existing contents.
+               /// </param>
+               /// <returns> the FSDirectory for the named file.  
+               /// </returns>
+        [Obsolete("Use IndexWriter's create flag, instead, to create a new index.")]
+               public static FSDirectory GetDirectory(System.String path, bool create)
+               {
+                       return GetDirectory(new System.IO.DirectoryInfo(path), create);
+               }
+               
+               /// <summary>Returns the directory instance for the named location.
+               /// 
+               /// </summary>
+               /// <deprecated> Use IndexWriter's create flag, instead, to
+               /// create a new index.
+               /// 
+               /// </deprecated>
+               /// <param name="file">the path to the directory.
+               /// </param>
+               /// <param name="create">if true, create, or erase any existing contents.
+               /// </param>
+               /// <returns> the FSDirectory for the named file.  
+               /// </returns>
+               [System.Obsolete("Use the method that takes a DirectoryInfo, this will be removed in the 3.0 release")]
+               public static FSDirectory GetDirectory(System.IO.FileInfo file, bool create)
+               {
+                       return GetDirectory(new System.IO.DirectoryInfo(file.FullName), create);
+               }
+
+        /// <summary>Returns the directory instance for the named location.
+        /// 
+        /// </summary>
+        /// <deprecated> Use IndexWriter's create flag, instead, to
+        /// create a new index.
+        /// 
+        /// </deprecated>
+        /// <param name="file">the path to the directory.
+        /// </param>
+        /// <param name="create">if true, create, or erase any existing contents.
+        /// </param>
+        /// <returns> the FSDirectory for the named file.  
+        /// </returns>
+        [Obsolete("Use IndexWriter's create flag, instead, to create a new index.")]
+        public static FSDirectory GetDirectory(System.IO.DirectoryInfo file, bool create)
+        {
+            FSDirectory dir = GetDirectory(file, null);
+
+            // This is now deprecated (creation should only be done
+            // by IndexWriter):
+            if (create)
+            {
+                dir.Create();
+            }
+
+            return dir;
+        }
+               
+               /// <deprecated> 
+               /// </deprecated>
+        [Obsolete]
+               private void  Create()
+               {
+                       if (directory.Exists)
+                       {
+                               System.String[] files = SupportClass.FileSupport.GetLuceneIndexFiles(directory.FullName, IndexFileNameFilter.GetFilter()); // clear old files
+                               if (files == null)
+                                       throw new System.IO.IOException("cannot read directory " + directory.FullName + ": list() returned null");
+                               for (int i = 0; i < files.Length; i++)
+                               {
+                    System.String fileOrDir = System.IO.Path.Combine(directory.FullName, files[i]);
+                    if (System.IO.File.Exists(fileOrDir))
+                                       {
+                        System.IO.File.Delete(fileOrDir);
+                                       }
+                    else if (System.IO.Directory.Exists(fileOrDir))
+                                       {
+                        System.IO.Directory.Delete(fileOrDir);
+                                       }
+                    // no need to throw anything - if a delete fails the exc will propogate to the caller
+                               }
+                       }
+                       lockFactory.ClearLock(IndexWriter.WRITE_LOCK_NAME);
+               }
+               
+               private bool checked_Renamed;
+               
+               internal void  CreateDir()
+               {
+                       if (!checked_Renamed)
+                       {
+                if (!this.directory.Exists)
+                {
+                    try
+                    {
+                        this.directory.Create();
+                    }
+                    catch (Exception)
+                    {
+                        throw new System.IO.IOException("Cannot create directory: " + directory);
+                    }
+                    this.directory.Refresh(); // need to see the creation
+                }
+                               
+                               checked_Renamed = true;
+                       }
+               }
+               
+               /// <summary>Initializes the directory to create a new file with the given name.
+               /// This method should be used in {@link #createOutput}. 
+               /// </summary>
+               protected internal void  InitOutput(System.String name)
+               {
+                       EnsureOpen();
+                       CreateDir();
+                       System.IO.FileInfo file = new System.IO.FileInfo(System.IO.Path.Combine(directory.FullName, name));
+            if (file.Exists) // delete existing, if any
+            {
+                try
+                {
+                    file.Delete();
+                }
+                catch (Exception)
+                {
+                    throw new System.IO.IOException("Cannot overwrite: " + file);
+                }
+            }
+               }
+               
+               /// <summary>The underlying filesystem directory </summary>
+               protected internal System.IO.DirectoryInfo directory = null;
+               
+               /// <deprecated> 
+               /// </deprecated>
+        [Obsolete]
+               private int refCount = 0;
+               
+               /// <deprecated> 
+               /// </deprecated>
+        [Obsolete]
+               protected internal FSDirectory()
+               {
+               }
+                // permit subclassing
+               
+               /// <summary>Create a new FSDirectory for the named location (ctor for subclasses).</summary>
+               /// <param name="path">the path of the directory
+               /// </param>
+               /// <param name="lockFactory">the lock factory to use, or null for the default
+               /// ({@link NativeFSLockFactory});
+               /// </param>
+               /// <throws>  IOException </throws>
+               protected internal FSDirectory(System.IO.DirectoryInfo path, LockFactory lockFactory)
+               {
+                       // new ctors use always NativeFSLockFactory as default:
+                       if (lockFactory == null)
+                       {
+                               lockFactory = new NativeFSLockFactory();
+                       }
+                       Init(path, lockFactory);
+                       refCount = 1;
+               }
+               
+               /// <summary>Creates an FSDirectory instance, trying to pick the
+               /// best implementation given the current environment.
+               /// The directory returned uses the {@link NativeFSLockFactory}.
+        /// 
+        /// <p/>Currently this returns {@link SimpleFSDirectory} as
+        /// NIOFSDirectory is currently not supported.
+               /// 
+               /// <p/>Currently this returns {@link SimpleFSDirectory} as
+               /// NIOFSDirectory is currently not supported.
+               /// 
+               /// <p/><b>NOTE</b>: this method may suddenly change which
+               /// implementation is returned from release to release, in
+               /// the event that higher performance defaults become
+               /// possible; if the precise implementation is important to
+               /// your application, please instantiate it directly,
+               /// instead. On 64 bit systems, it may also good to
+               /// return {@link MMapDirectory}, but this is disabled
+               /// because of officially missing unmap support in Java.
+               /// For optimal performance you should consider using
+               /// this implementation on 64 bit JVMs.
+               /// 
+               /// <p/>See <a href="#subclasses">above</a> 
+               /// </summary>
+               [System.Obsolete("Use the method that takes a DirectoryInfo, this will be removed in the 3.0 release")]
+               public static FSDirectory Open(System.IO.FileInfo path)
+               {
+                       System.IO.DirectoryInfo dir = new System.IO.DirectoryInfo(path.FullName);
+                       return Open(dir, null);
+               }
+               
+               /// <summary>Creates an FSDirectory instance, trying to pick the
+               /// best implementation given the current environment.
+               /// The directory returned uses the {@link NativeFSLockFactory}.
+               /// 
+               /// <p/>Currently this returns {@link SimpleFSDirectory} as
+               /// NIOFSDirectory is currently not supported.
+               /// 
+               /// <p/><b>NOTE</b>: this method may suddenly change which
+               /// implementation is returned from release to release, in
+               /// the event that higher performance defaults become
+               /// possible; if the precise implementation is important to
+               /// your application, please instantiate it directly,
+               /// instead. On 64 bit systems, it may also good to
+               /// return {@link MMapDirectory}, but this is disabled
+               /// because of officially missing unmap support in Java.
+               /// For optimal performance you should consider using
+               /// this implementation on 64 bit JVMs.
+               /// 
+               /// <p/>See <a href="#subclasses">above</a> 
+               /// </summary>
+               public static FSDirectory Open(System.IO.DirectoryInfo path)
+               {
+                       return Open(path, null);
+               }
+               
+               /// <summary>Just like {@link #Open(File)}, but allows you to
+               /// also specify a custom {@link LockFactory}. 
+               /// </summary>
+               public static FSDirectory Open(System.IO.DirectoryInfo path, LockFactory lockFactory)
+               {
+                       /* For testing:
+                       MMapDirectory dir=new MMapDirectory(path, lockFactory);
+                       dir.setUseUnmap(true);
+                       return dir;
+                       */
+                       
+                       if (Constants.WINDOWS)
+                       {
+                               return new SimpleFSDirectory(path, lockFactory);
+                       }
+                       else
+                       {
+                //NIOFSDirectory is not implemented in Mono.Lucene.Net
+                               //return new NIOFSDirectory(path, lockFactory);
+                return new SimpleFSDirectory(path, lockFactory);
+                       }
+        }
+               
+               /* will move to ctor, when reflection is removed in 3.0 */
+               private void  Init(System.IO.DirectoryInfo path, LockFactory lockFactory)
+               {
+                       
+                       // Set up lockFactory with cascaded defaults: if an instance was passed in,
+                       // use that; else if locks are disabled, use NoLockFactory; else if the
+                       // system property Mono.Lucene.Net.Store.FSDirectoryLockFactoryClass is set,
+                       // instantiate that; else, use SimpleFSLockFactory:
+                       
+                       directory = path;
+                       
+            // due to differences in how Java & .NET refer to files, the checks are a bit different
+            if (!directory.Exists && System.IO.File.Exists(directory.FullName))
+            {
+                throw new NoSuchDirectoryException("file '" + directory.FullName + "' exists but is not a directory");
+            }
+                       
+                       if (lockFactory == null)
+                       {
+                               
+                               if (disableLocks)
+                               {
+                                       // Locks are disabled:
+                                       lockFactory = NoLockFactory.GetNoLockFactory();
+                               }
+                               else
+                               {
+                                       System.String lockClassName = SupportClass.AppSettings.Get("Mono.Lucene.Net.Store.FSDirectoryLockFactoryClass", "");
+                                       
+                                       if (lockClassName != null && !lockClassName.Equals(""))
+                                       {
+                                               System.Type c;
+                                               
+                                               try
+                                               {
+                                                       c = System.Type.GetType(lockClassName);
+                                               }
+                                               catch (System.Exception e)
+                                               {
+                                                       throw new System.IO.IOException("unable to find LockClass " + lockClassName);
+                                               }
+                                               
+                                               try
+                                               {
+                                                       lockFactory = (LockFactory) System.Activator.CreateInstance(c, true);
+                                               }
+                                               catch (System.UnauthorizedAccessException e)
+                                               {
+                                                       throw new System.IO.IOException("IllegalAccessException when instantiating LockClass " + lockClassName);
+                                               }
+                                               catch (System.InvalidCastException e)
+                                               {
+                                                       throw new System.IO.IOException("unable to cast LockClass " + lockClassName + " instance to a LockFactory");
+                                               }
+                                               catch (System.Exception e)
+                                               {
+                                                       throw new System.IO.IOException("InstantiationException when instantiating LockClass " + lockClassName);
+                                               }
+                                       }
+                                       else
+                                       {
+                                               // Our default lock is SimpleFSLockFactory;
+                                               // default lockDir is our index directory:
+                                               lockFactory = new SimpleFSLockFactory();
+                                       }
+                               }
+                       }
+                       
+                       SetLockFactory(lockFactory);
+                       
+                       // for filesystem based LockFactory, delete the lockPrefix, if the locks are placed
+                       // in index dir. If no index dir is given, set ourselves
+                       if (lockFactory is FSLockFactory)
+                       {
+                               FSLockFactory lf = (FSLockFactory) lockFactory;
+                               System.IO.DirectoryInfo dir = lf.GetLockDir();
+                               // if the lock factory has no lockDir set, use the this directory as lockDir
+                               if (dir == null)
+                               {
+                                       lf.SetLockDir(this.directory);
+                                       lf.SetLockPrefix(null);
+                               }
+                               else if (dir.FullName.Equals(this.directory.FullName))
+                               {
+                                       lf.SetLockPrefix(null);
+                               }
+                       }
+               }
+               
+               /// <summary>Lists all files (not subdirectories) in the
+               /// directory.  This method never returns null (throws
+               /// {@link IOException} instead).
+               /// 
+               /// </summary>
+               /// <throws>  NoSuchDirectoryException if the directory </throws>
+               /// <summary>   does not exist, or does exist but is not a
+               /// directory.
+               /// </summary>
+               /// <throws>  IOException if list() returns null  </throws>
+               [System.Obsolete("Use the method that takes a DirectoryInfo, this will be removed in the 3.0 release")]
+               public static System.String[] ListAll(System.IO.FileInfo dir)
+               {
+                       return ListAll(new System.IO.DirectoryInfo(dir.FullName));
+               }
+               
+        /// <summary>Lists all files (not subdirectories) in the
+        /// directory.  This method never returns null (throws
+        /// {@link IOException} instead).
+        /// 
+        /// </summary>
+        /// <throws>  NoSuchDirectoryException if the directory </throws>
+        /// <summary>   does not exist, or does exist but is not a
+        /// directory.
+        /// </summary>
+        /// <throws>  IOException if list() returns null  </throws>
+        public static System.String[] ListAll(System.IO.DirectoryInfo dir)
+        {
+            if (!dir.Exists)
+            {
+                throw new NoSuchDirectoryException("directory '" + dir.FullName + "' does not exist");
+            }
+            // Exclude subdirs, only the file names, not the paths
+            System.IO.FileInfo[] files = dir.GetFiles();
+            System.String[] result = new System.String[files.Length];
+            for (int i = 0; i < files.Length; i++)
+            {
+                result[i] = files[i].Name;
+            }
+
+            // no reason to return null, if the directory cannot be listed, an exception 
+            // will be thrown on the above call to dir.GetFiles()
+            // use of LINQ to create the return value array may be a bit more efficient
+
+            return result;
+        }
+               
+        [Obsolete("Mono.Lucene.Net-2.9.1. This method overrides obsolete member Mono.Lucene.Net.Store.Directory.List()")]
+               public override System.String[] List()
+               {
+                       EnsureOpen();
+                       return SupportClass.FileSupport.GetLuceneIndexFiles(directory.FullName, IndexFileNameFilter.GetFilter());
+               }
+               
+               /// <summary>Lists all files (not subdirectories) in the
+               /// directory.
+               /// </summary>
+               /// <seealso cref="ListAll(File)">
+               /// </seealso>
+               public override System.String[] ListAll()
+               {
+                       EnsureOpen();
+                       return ListAll(directory);
+               }
+               
+               /// <summary>Returns true iff a file with the given name exists. </summary>
+               public override bool FileExists(System.String name)
+               {
+                       EnsureOpen();
+                       System.IO.FileInfo file = new System.IO.FileInfo(System.IO.Path.Combine(directory.FullName, name));
+            return file.Exists;
+               }
+               
+               /// <summary>Returns the time the named file was last modified. </summary>
+               public override long FileModified(System.String name)
+               {
+                       EnsureOpen();
+                       System.IO.FileInfo file = new System.IO.FileInfo(System.IO.Path.Combine(directory.FullName, name));
+            return (long)file.LastWriteTime.ToUniversalTime().Subtract(new DateTime(1970, 1, 1, 0, 0, 0)).TotalMilliseconds; //{{LUCENENET-353}}
+               }
+               
+               /// <summary>Returns the time the named file was last modified. </summary>
+               public static long FileModified(System.IO.FileInfo directory, System.String name)
+               {
+                       System.IO.FileInfo file = new System.IO.FileInfo(System.IO.Path.Combine(directory.FullName, name));
+            return (long)file.LastWriteTime.ToUniversalTime().Subtract(new DateTime(1970, 1, 1, 0, 0, 0)).TotalMilliseconds; //{{LUCENENET-353}}
+               }
+               
+               /// <summary>Set the modified time of an existing file to now. </summary>
+               public override void  TouchFile(System.String name)
+               {
+                       EnsureOpen();
+                       System.IO.FileInfo file = new System.IO.FileInfo(System.IO.Path.Combine(directory.FullName, name));
+                       file.LastWriteTime = System.DateTime.Now;
+               }
+               
+               /// <summary>Returns the length in bytes of a file in the directory. </summary>
+               public override long FileLength(System.String name)
+               {
+                       EnsureOpen();
+                       System.IO.FileInfo file = new System.IO.FileInfo(System.IO.Path.Combine(directory.FullName, name));
+                       return file.Exists ? file.Length : 0;
+               }
+               
+               /// <summary>Removes an existing file in the directory. </summary>
+               public override void  DeleteFile(System.String name)
+               {
+                       EnsureOpen();
+                       System.IO.FileInfo file = new System.IO.FileInfo(System.IO.Path.Combine(directory.FullName, name));
+            try
+            {
+                file.Delete();
+            }
+            catch (Exception)
+            {
+                throw new System.IO.IOException("Cannot delete " + file);
+            }
+               }
+               
+               /// <summary>Renames an existing file in the directory. 
+               /// Warning: This is not atomic.
+               /// </summary>
+               /// <deprecated> 
+               /// </deprecated>
+        [Obsolete]
+               public override void  RenameFile(System.String from, System.String to)
+               {
+                       lock (this)
+                       {
+                               EnsureOpen();
+                System.IO.FileInfo old = new System.IO.FileInfo(System.IO.Path.Combine(directory.FullName, from));
+                try
+                {
+                    old.MoveTo(System.IO.Path.Combine(directory.FullName, to));
+                }
+                catch (System.IO.IOException ioe)
+                {
+                    System.IO.IOException newExc = new System.IO.IOException("Cannot rename " + old + " to " + directory, ioe);
+                    throw newExc;
+                }
+                       }
+               }
+               
+               /// <summary>Creates an IndexOutput for the file with the given name.
+               /// <em>In 3.0 this method will become abstract.</em> 
+               /// </summary>
+               public override IndexOutput CreateOutput(System.String name)
+               {
+                       InitOutput(name);
+                       return new FSIndexOutput(new System.IO.FileInfo(System.IO.Path.Combine(directory.FullName, name)));
+               }
+               
+               public override void  Sync(System.String name)
+               {
+                       EnsureOpen();
+                       System.IO.FileInfo fullFile = new System.IO.FileInfo(System.IO.Path.Combine(directory.FullName, name));
+                       bool success = false;
+                       int retryCount = 0;
+                       System.IO.IOException exc = null;
+                       while (!success && retryCount < 5)
+                       {
+                               retryCount++;
+                               System.IO.FileStream file = null;
+                               try
+                               {
+                                       try
+                                       {
+                        file = new System.IO.FileStream(fullFile.FullName, System.IO.FileMode.OpenOrCreate, System.IO.FileAccess.Write, System.IO.FileShare.ReadWrite);
+                        SupportClass.FileSupport.Sync(file);
+                        success = true;
+                                       }
+                                       finally
+                                       {
+                                               if (file != null)
+                                                       file.Close();
+                                       }
+                               }
+                               catch (System.IO.IOException ioe)
+                               {
+                                       if (exc == null)
+                                               exc = ioe;
+                                       try
+                                       {
+                                               // Pause 5 msec
+                                               System.Threading.Thread.Sleep(new System.TimeSpan((System.Int64) 10000 * 5));
+                                       }
+                                       catch (System.Threading.ThreadInterruptedException ie)
+                                       {
+                                               // In 3.0 we will change this to throw
+                                               // InterruptedException instead
+                                               SupportClass.ThreadClass.Current().Interrupt();
+                        throw new System.SystemException(ie.ToString(), ie);
+                                       }
+                               }
+                       }
+                       if (!success)
+                       // Throw original exception
+                               throw exc;
+               }
+               
+               // Inherit javadoc
+               public override IndexInput OpenInput(System.String name)
+               {
+                       EnsureOpen();
+                       return OpenInput(name, BufferedIndexInput.BUFFER_SIZE);
+               }
+               
+               /// <summary>Creates an IndexInput for the file with the given name.
+               /// <em>In 3.0 this method will become abstract.</em> 
+               /// </summary>
+               public override IndexInput OpenInput(System.String name, int bufferSize)
+               {
+                       EnsureOpen();
+                       return new FSIndexInput(new System.IO.FileInfo(System.IO.Path.Combine(directory.FullName, name)), bufferSize);
+               }
+               
+               /// <summary> So we can do some byte-to-hexchar conversion below</summary>
+               private static readonly char[] HEX_DIGITS = new char[]{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'};
+               
+               
+               public override System.String GetLockID()
+               {
+                       EnsureOpen();
+                       System.String dirName; // name to be hashed
+                       try
+                       {
+                               dirName = directory.FullName;
+                       }
+                       catch (System.IO.IOException e)
+                       {
+                               throw new System.SystemException(e.ToString(), e);
+                       }
+                       
+                       byte[] digest;
+                       lock (DIGESTER)
+                       {
+                               digest = DIGESTER.ComputeHash(System.Text.Encoding.UTF8.GetBytes(dirName));
+                       }
+                       System.Text.StringBuilder buf = new System.Text.StringBuilder();
+                       buf.Append("lucene-");
+                       for (int i = 0; i < digest.Length; i++)
+                       {
+                               int b = digest[i];
+                               buf.Append(HEX_DIGITS[(b >> 4) & 0xf]);
+                               buf.Append(HEX_DIGITS[b & 0xf]);
+                       }
+                       
+                       return buf.ToString();
+               }
+               
+               /// <summary>Closes the store to future operations. </summary>
+               public override void  Close()
+               {
+                       lock (this)
+                       {
+                               if (isOpen && --refCount <= 0)
+                               {
+                                       isOpen = false;
+                                       lock (DIRECTORIES)
+                                       {
+                                               DIRECTORIES.Remove(directory.FullName);
+                                       }
+                               }
+                       }
+               }
+
+        /// <summary>
+        /// .NET
+        /// </summary>
+        public override void Dispose()
+        {
+            Close();
+        }
+
+        [System.Obsolete("A DirectoryInfo is more appropriate, however this is here for backwards compatibility. This will be removed in the 3.0 release")]
+               public virtual System.IO.FileInfo GetFile()
+               {
+                       EnsureOpen();
+                       return new System.IO.FileInfo(directory.FullName);
+               }
+
+
+        // Java Lucene implements GetFile() which returns a FileInfo.
+        // For Mono.Lucene.Net, GetDirectory() is more appropriate
+        public virtual System.IO.DirectoryInfo GetDirectory()
+        {
+            EnsureOpen();
+            return directory;
+        }
+               
+               /// <summary>For debug output. </summary>
+               public override System.String ToString()
+               {
+            return this.GetType().FullName + "@" + directory + " lockFactory=" + GetLockFactory();
+               }
+               
+               /// <summary> Default read chunk size.  This is a conditional
+               /// default: on 32bit JVMs, it defaults to 100 MB.  On
+               /// 64bit JVMs, it's <code>Integer.MAX_VALUE</code>.
+               /// </summary>
+               /// <seealso cref="setReadChunkSize">
+               /// </seealso>
+               public static readonly int DEFAULT_READ_CHUNK_SIZE;
+               
+               // LUCENE-1566
+               private int chunkSize = DEFAULT_READ_CHUNK_SIZE;
+               
+               /// <summary> Sets the maximum number of bytes read at once from the
+               /// underlying file during {@link IndexInput#readBytes}.
+               /// The default value is {@link #DEFAULT_READ_CHUNK_SIZE};
+               /// 
+               /// <p/> This was introduced due to <a
+               /// href="http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=6478546">Sun
+               /// JVM Bug 6478546</a>, which throws an incorrect
+               /// OutOfMemoryError when attempting to read too many bytes
+               /// at once.  It only happens on 32bit JVMs with a large
+               /// maximum heap size.<p/>
+               /// 
+               /// <p/>Changes to this value will not impact any
+               /// already-opened {@link IndexInput}s.  You should call
+               /// this before attempting to open an index on the
+               /// directory.<p/>
+               /// 
+               /// <p/> <b>NOTE</b>: This value should be as large as
+               /// possible to reduce any possible performance impact.  If
+               /// you still encounter an incorrect OutOfMemoryError,
+               /// trying lowering the chunk size.<p/>
+               /// </summary>
+               public void  SetReadChunkSize(int chunkSize)
+               {
+                       // LUCENE-1566
+                       if (chunkSize <= 0)
+                       {
+                               throw new System.ArgumentException("chunkSize must be positive");
+                       }
+                       if (!Constants.JRE_IS_64BIT)
+                       {
+                               this.chunkSize = chunkSize;
+                       }
+               }
+               
+               /// <summary> The maximum number of bytes to read at once from the
+               /// underlying file during {@link IndexInput#readBytes}.
+               /// </summary>
+               /// <seealso cref="setReadChunkSize">
+               /// </seealso>
+               public int GetReadChunkSize()
+               {
+                       // LUCENE-1566
+                       return chunkSize;
+               }
+               
+               
+               /// <deprecated> Use SimpleFSDirectory.SimpleFSIndexInput instead 
+               /// </deprecated>
+        [Obsolete("Use SimpleFSDirectory.SimpleFSIndexInput instead ")]
+               public /*protected internal*/ class FSIndexInput:SimpleFSDirectory.SimpleFSIndexInput
+               {
+                       
+                       /// <deprecated> 
+                       /// </deprecated>
+            [Obsolete]
+                       new protected internal class Descriptor:SimpleFSDirectory.SimpleFSIndexInput.Descriptor
+                       {
+                               /// <deprecated> 
+                               /// </deprecated>
+                [Obsolete]
+                               public Descriptor(/*FSIndexInput enclosingInstance,*/ System.IO.FileInfo file, System.IO.FileAccess mode) : base(file, mode)
+                               {
+                               }
+                       }
+                       
+                       /// <deprecated> 
+                       /// </deprecated>
+            [Obsolete]
+                       public FSIndexInput(System.IO.FileInfo path):base(path)
+                       {
+                       }
+                       
+                       /// <deprecated> 
+                       /// </deprecated>
+            [Obsolete]
+                       public FSIndexInput(System.IO.FileInfo path, int bufferSize):base(path, bufferSize)
+                       {
+                       }
+               }
+               
+               /// <deprecated> Use SimpleFSDirectory.SimpleFSIndexOutput instead 
+               /// </deprecated>
+        [Obsolete("Use SimpleFSDirectory.SimpleFSIndexOutput instead ")]
+               protected internal class FSIndexOutput:SimpleFSDirectory.SimpleFSIndexOutput
+               {
+                       
+                       /// <deprecated> 
+                       /// </deprecated>
+            [Obsolete]
+                       public FSIndexOutput(System.IO.FileInfo path):base(path)
+                       {
+                       }
+               }
+               static FSDirectory()
+               {
+                       {
+                               try
+                               {
+                                       System.String name = SupportClass.AppSettings.Get("Mono.Lucene.Net.FSDirectory.class", typeof(SimpleFSDirectory).FullName);
+                                       if (typeof(FSDirectory).FullName.Equals(name))
+                                       {
+                                               // FSDirectory will be abstract, so we replace it by the correct class
+                                               IMPL = typeof(SimpleFSDirectory);
+                                       }
+                                       else
+                                       {
+                                               IMPL = System.Type.GetType(name);
+                                       }
+                               }
+                               catch (System.Security.SecurityException se)
+                               {
+                                       IMPL = typeof(SimpleFSDirectory);
+                               }
+                               catch (System.Exception e)
+                               {
+                                       throw new System.SystemException("cannot load FSDirectory class: " + e.ToString(), e);
+                               }
+                       }
+                       {
+                               try
+                               {
+                                       DIGESTER = SupportClass.Cryptography.GetHashAlgorithm();
+                               }
+                               catch (System.Exception e)
+                               {
+                                       throw new System.SystemException(e.ToString(), e);
+                               }
+                       }
+                       DEFAULT_READ_CHUNK_SIZE = Constants.JRE_IS_64BIT?System.Int32.MaxValue:100 * 1024 * 1024;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/FSLockFactory.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/FSLockFactory.cs
new file mode 100644 (file)
index 0000000..e085fd2
--- /dev/null
@@ -0,0 +1,50 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Store
+{
+       
+       /// <summary> Base class for file system based locking implementation.</summary>
+       
+       public abstract class FSLockFactory:LockFactory
+       {
+               
+               /// <summary> Directory for the lock files.</summary>
+               protected internal System.IO.DirectoryInfo lockDir = null;
+               
+               /// <summary> Set the lock directory. This method can be only called
+               /// once to initialize the lock directory. It is used by {@link FSDirectory}
+               /// to set the lock directory to itsself.
+               /// Subclasses can also use this method to set the directory
+               /// in the constructor.
+               /// </summary>
+               protected internal virtual void  SetLockDir(System.IO.DirectoryInfo lockDir)
+               {
+                       if (this.lockDir != null)
+                               throw new System.SystemException("You can set the lock directory for this factory only once.");
+                       this.lockDir = lockDir;
+               }
+               
+               /// <summary> Retrieve the lock directory.</summary>
+               public virtual System.IO.DirectoryInfo GetLockDir()
+               {
+                       return lockDir;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/FileSwitchDirectory.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/FileSwitchDirectory.cs
new file mode 100644 (file)
index 0000000..eb7b2a1
--- /dev/null
@@ -0,0 +1,171 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Store
+{
+       
+       /// <summary> Expert: A Directory instance that switches files between
+       /// two other Directory instances.
+       /// <p/>Files with the specified extensions are placed in the
+       /// primary directory; others are placed in the secondary
+       /// directory.  The provided Set must not change once passed
+       /// to this class, and must allow multiple threads to call
+       /// contains at once.<p/>
+       /// 
+       /// <p/><b>NOTE</b>: this API is new and experimental and is
+       /// subject to suddenly change in the next release.
+       /// </summary>
+       
+       public class FileSwitchDirectory:Directory
+       {
+               private Directory secondaryDir;
+               private Directory primaryDir;
+               private System.Collections.Hashtable primaryExtensions;
+               private bool doClose;
+               
+               public FileSwitchDirectory(System.Collections.Hashtable primaryExtensions, Directory primaryDir, Directory secondaryDir, bool doClose)
+               {
+                       this.primaryExtensions = primaryExtensions;
+                       this.primaryDir = primaryDir;
+                       this.secondaryDir = secondaryDir;
+                       this.doClose = doClose;
+                       this.lockFactory = primaryDir.GetLockFactory();
+               }
+               
+               /// <summary>Return the primary directory </summary>
+               public virtual Directory GetPrimaryDir()
+               {
+                       return primaryDir;
+               }
+               
+               /// <summary>Return the secondary directory </summary>
+               public virtual Directory GetSecondaryDir()
+               {
+                       return secondaryDir;
+               }
+               
+               public override void  Close()
+               {
+                       if (doClose)
+                       {
+                               try
+                               {
+                                       secondaryDir.Close();
+                               }
+                               finally
+                               {
+                                       primaryDir.Close();
+                               }
+                               doClose = false;
+                       }
+               }
+
+        /// <summary>
+        /// .NET
+        /// </summary>
+        public override void Dispose()
+        {
+            Close();
+        }
+               
+               public override System.String[] ListAll()
+               {
+            System.Collections.Generic.List<string> files = new System.Collections.Generic.List<string>();
+            files.AddRange(primaryDir.ListAll());
+            files.AddRange(secondaryDir.ListAll());
+            return files.ToArray();
+               }
+
+        [Obsolete("Mono.Lucene.Net-2.9.1. This method overrides obsolete member Mono.Lucene.Net.Store.Directory.List()")]
+               public override System.String[] List()
+               {
+                       return ListAll();
+               }
+               
+               /// <summary>Utility method to return a file's extension. </summary>
+               public static System.String GetExtension(System.String name)
+               {
+                       int i = name.LastIndexOf('.');
+                       if (i == - 1)
+                       {
+                               return "";
+                       }
+                       return name.Substring(i + 1, (name.Length) - (i + 1));
+               }
+               
+               private Directory GetDirectory(System.String name)
+               {
+                       System.String ext = GetExtension(name);
+                       if (primaryExtensions.Contains(ext))
+                       {
+                               return primaryDir;
+                       }
+                       else
+                       {
+                               return secondaryDir;
+                       }
+               }
+               
+               public override bool FileExists(System.String name)
+               {
+                       return GetDirectory(name).FileExists(name);
+               }
+               
+               public override long FileModified(System.String name)
+               {
+                       return GetDirectory(name).FileModified(name);
+               }
+               
+               public override void  TouchFile(System.String name)
+               {
+                       GetDirectory(name).TouchFile(name);
+               }
+               
+               public override void  DeleteFile(System.String name)
+               {
+                       GetDirectory(name).DeleteFile(name);
+               }
+
+        [Obsolete("Mono.Lucene.Net-2.9.1. This method overrides obsolete member Mono.Lucene.Net.Store.Directory.RenameFile(string, string)")]
+               public override void  RenameFile(System.String from, System.String to)
+               {
+                       GetDirectory(from).RenameFile(from, to);
+               }
+               
+               public override long FileLength(System.String name)
+               {
+                       return GetDirectory(name).FileLength(name);
+               }
+               
+               public override IndexOutput CreateOutput(System.String name)
+               {
+                       return GetDirectory(name).CreateOutput(name);
+               }
+               
+               public override void  Sync(System.String name)
+               {
+                       GetDirectory(name).Sync(name);
+               }
+               
+               public override IndexInput OpenInput(System.String name)
+               {
+                       return GetDirectory(name).OpenInput(name);
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/IndexInput.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/IndexInput.cs
new file mode 100644 (file)
index 0000000..133d83e
--- /dev/null
@@ -0,0 +1,277 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Store
+{
+       
+       /// <summary>Abstract base class for input from a file in a {@link Directory}.  A
+       /// random-access input stream.  Used for all Lucene index input operations.
+       /// </summary>
+       /// <seealso cref="Directory">
+       /// </seealso>
+       public abstract class IndexInput : System.ICloneable
+       {
+               private bool preUTF8Strings; // true if we are reading old (modified UTF8) string format
+               
+               /// <summary>Reads and returns a single byte.</summary>
+               /// <seealso cref="IndexOutput.WriteByte(byte)">
+               /// </seealso>
+               public abstract byte ReadByte();
+               
+               /// <summary>Reads a specified number of bytes into an array at the specified offset.</summary>
+               /// <param name="b">the array to read bytes into
+               /// </param>
+               /// <param name="offset">the offset in the array to start storing bytes
+               /// </param>
+               /// <param name="len">the number of bytes to read
+               /// </param>
+               /// <seealso cref="IndexOutput.WriteBytes(byte[],int)">
+               /// </seealso>
+               public abstract void  ReadBytes(byte[] b, int offset, int len);
+               
+               /// <summary>Reads a specified number of bytes into an array at the
+               /// specified offset with control over whether the read
+               /// should be buffered (callers who have their own buffer
+               /// should pass in "false" for useBuffer).  Currently only
+               /// {@link BufferedIndexInput} respects this parameter.
+               /// </summary>
+               /// <param name="b">the array to read bytes into
+               /// </param>
+               /// <param name="offset">the offset in the array to start storing bytes
+               /// </param>
+               /// <param name="len">the number of bytes to read
+               /// </param>
+               /// <param name="useBuffer">set to false if the caller will handle
+               /// buffering.
+               /// </param>
+               /// <seealso cref="IndexOutput.WriteBytes(byte[],int)">
+               /// </seealso>
+               public virtual void  ReadBytes(byte[] b, int offset, int len, bool useBuffer)
+               {
+                       // Default to ignoring useBuffer entirely
+                       ReadBytes(b, offset, len);
+               }
+               
+               /// <summary>Reads four bytes and returns an int.</summary>
+               /// <seealso cref="IndexOutput.WriteInt(int)">
+               /// </seealso>
+               public virtual int ReadInt()
+               {
+                       return ((ReadByte() & 0xFF) << 24) | ((ReadByte() & 0xFF) << 16) | ((ReadByte() & 0xFF) << 8) | (ReadByte() & 0xFF);
+               }
+               
+               /// <summary>Reads an int stored in variable-length format.  Reads between one and
+               /// five bytes.  Smaller values take fewer bytes.  Negative numbers are not
+               /// supported.
+               /// </summary>
+               /// <seealso cref="IndexOutput.WriteVInt(int)">
+               /// </seealso>
+               public virtual int ReadVInt()
+               {
+                       byte b = ReadByte();
+                       int i = b & 0x7F;
+                       for (int shift = 7; (b & 0x80) != 0; shift += 7)
+                       {
+                               b = ReadByte();
+                               i |= (b & 0x7F) << shift;
+                       }
+                       return i;
+               }
+               
+               /// <summary>Reads eight bytes and returns a long.</summary>
+               /// <seealso cref="IndexOutput.WriteLong(long)">
+               /// </seealso>
+               public virtual long ReadLong()
+               {
+                       return (((long) ReadInt()) << 32) | (ReadInt() & 0xFFFFFFFFL);
+               }
+               
+               /// <summary>Reads a long stored in variable-length format.  Reads between one and
+               /// nine bytes.  Smaller values take fewer bytes.  Negative numbers are not
+               /// supported. 
+               /// </summary>
+               public virtual long ReadVLong()
+               {
+                       byte b = ReadByte();
+                       long i = b & 0x7F;
+                       for (int shift = 7; (b & 0x80) != 0; shift += 7)
+                       {
+                               b = ReadByte();
+                               i |= (b & 0x7FL) << shift;
+                       }
+                       return i;
+               }
+               
+               /// <summary>Call this if readString should read characters stored
+               /// in the old modified UTF8 format (length in java chars
+               /// and java's modified UTF8 encoding).  This is used for
+               /// indices written pre-2.4 See LUCENE-510 for details. 
+               /// </summary>
+               public virtual void  SetModifiedUTF8StringsMode()
+               {
+                       preUTF8Strings = true;
+               }
+               
+               /// <summary>Reads a string.</summary>
+               /// <seealso cref="IndexOutput.WriteString(String)">
+               /// </seealso>
+               public virtual System.String ReadString()
+               {
+                       if (preUTF8Strings)
+                               return ReadModifiedUTF8String();
+                       int length = ReadVInt();
+            byte[] bytes = new byte[length];
+                       ReadBytes(bytes, 0, length);
+            return System.Text.Encoding.UTF8.GetString(bytes, 0, length);
+               }
+               
+               private System.String ReadModifiedUTF8String()
+               {
+                       int length = ReadVInt();
+            char[] chars = new char[length];
+                       ReadChars(chars, 0, length);
+                       return new System.String(chars, 0, length);
+               }
+               
+               /// <summary>Reads Lucene's old "modified UTF-8" encoded
+               /// characters into an array.
+               /// </summary>
+               /// <param name="buffer">the array to read characters into
+               /// </param>
+               /// <param name="start">the offset in the array to start storing characters
+               /// </param>
+               /// <param name="length">the number of characters to read
+               /// </param>
+               /// <seealso cref="IndexOutput.WriteChars(String,int,int)">
+               /// </seealso>
+               /// <deprecated> -- please use readString or readBytes
+               /// instead, and construct the string
+               /// from those utf8 bytes
+               /// </deprecated>
+        [Obsolete("-- please use ReadString or ReadBytes instead, and construct the string from those utf8 bytes")]
+               public virtual void  ReadChars(char[] buffer, int start, int length)
+               {
+                       int end = start + length;
+                       for (int i = start; i < end; i++)
+                       {
+                               byte b = ReadByte();
+                               if ((b & 0x80) == 0)
+                                       buffer[i] = (char) (b & 0x7F);
+                               else if ((b & 0xE0) != 0xE0)
+                               {
+                                       buffer[i] = (char) (((b & 0x1F) << 6) | (ReadByte() & 0x3F));
+                               }
+                               else
+                                       buffer[i] = (char) (((b & 0x0F) << 12) | ((ReadByte() & 0x3F) << 6) | (ReadByte() & 0x3F));
+                       }
+               }
+               
+               /// <summary> Expert
+               /// 
+               /// Similar to {@link #ReadChars(char[], int, int)} but does not do any conversion operations on the bytes it is reading in.  It still
+               /// has to invoke {@link #ReadByte()} just as {@link #ReadChars(char[], int, int)} does, but it does not need a buffer to store anything
+               /// and it does not have to do any of the bitwise operations, since we don't actually care what is in the byte except to determine
+               /// how many more bytes to read
+               /// </summary>
+               /// <param name="length">The number of chars to read
+               /// </param>
+               /// <deprecated> this method operates on old "modified utf8" encoded
+               /// strings
+               /// </deprecated>
+        [Obsolete("this method operates on old \"modified utf8\" encoded strings")]
+               public virtual void  SkipChars(int length)
+               {
+                       for (int i = 0; i < length; i++)
+                       {
+                               byte b = ReadByte();
+                               if ((b & 0x80) == 0)
+                               {
+                                       //do nothing, we only need one byte
+                               }
+                               else if ((b & 0xE0) != 0xE0)
+                               {
+                                       ReadByte(); //read an additional byte
+                               }
+                               else
+                               {
+                                       //read two additional bytes.
+                                       ReadByte();
+                                       ReadByte();
+                               }
+                       }
+               }
+               
+               
+               /// <summary>Closes the stream to futher operations. </summary>
+               public abstract void  Close();
+               
+               /// <summary>Returns the current position in this file, where the next read will
+               /// occur.
+               /// </summary>
+               /// <seealso cref="Seek(long)">
+               /// </seealso>
+               public abstract long GetFilePointer();
+               
+               /// <summary>Sets current position in this file, where the next read will occur.</summary>
+               /// <seealso cref="GetFilePointer()">
+               /// </seealso>
+               public abstract void  Seek(long pos);
+               
+               /// <summary>The number of bytes in the file. </summary>
+               public abstract long Length();
+               
+               /// <summary>Returns a clone of this stream.
+               /// 
+               /// <p/>Clones of a stream access the same data, and are positioned at the same
+               /// point as the stream they were cloned from.
+               /// 
+               /// <p/>Expert: Subclasses must ensure that clones may be positioned at
+               /// different points in the input from each other and from the stream they
+               /// were cloned from.
+               /// </summary>
+               public virtual System.Object Clone()
+               {
+                       IndexInput clone = null;
+                       try
+                       {
+                               clone = (IndexInput) base.MemberwiseClone();
+                       }
+                       catch (System.Exception e)
+                       {
+                       }
+                       
+                       return clone;
+               }
+               
+               // returns Map<String, String>
+               public virtual System.Collections.Generic.IDictionary<string,string> ReadStringStringMap()
+               {
+            System.Collections.Generic.Dictionary<string, string> map = new System.Collections.Generic.Dictionary<string, string>();
+                       int count = ReadInt();
+                       for (int i = 0; i < count; i++)
+                       {
+                               System.String key = ReadString();
+                               System.String val = ReadString();
+                               map[key] = val;
+                       }
+                       
+                       return map;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/IndexOutput.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/IndexOutput.cs
new file mode 100644 (file)
index 0000000..a0c31fe
--- /dev/null
@@ -0,0 +1,273 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using UnicodeUtil = Mono.Lucene.Net.Util.UnicodeUtil;
+
+namespace Mono.Lucene.Net.Store
+{
+       
+       /// <summary>Abstract base class for output to a file in a Directory.  A random-access
+       /// output stream.  Used for all Lucene index output operations.
+       /// </summary>
+       /// <seealso cref="Directory">
+       /// </seealso>
+       /// <seealso cref="IndexInput">
+       /// </seealso>
+       public abstract class IndexOutput
+       {
+               /// <summary>Writes a single byte.</summary>
+               /// <seealso cref="IndexInput.ReadByte()">
+               /// </seealso>
+               public abstract void  WriteByte(byte b);
+               
+               /// <summary>Writes an array of bytes.</summary>
+               /// <param name="b">the bytes to write
+               /// </param>
+               /// <param name="length">the number of bytes to write
+               /// </param>
+               /// <seealso cref="IndexInput.ReadBytes(byte[],int,int)">
+               /// </seealso>
+               public virtual void  WriteBytes(byte[] b, int length)
+               {
+                       WriteBytes(b, 0, length);
+               }
+               
+               /// <summary>Writes an array of bytes.</summary>
+               /// <param name="b">the bytes to write
+               /// </param>
+               /// <param name="offset">the offset in the byte array
+               /// </param>
+               /// <param name="length">the number of bytes to write
+               /// </param>
+               /// <seealso cref="IndexInput.ReadBytes(byte[],int,int)">
+               /// </seealso>
+               public abstract void  WriteBytes(byte[] b, int offset, int length);
+               
+               /// <summary>Writes an int as four bytes.</summary>
+               /// <seealso cref="IndexInput.ReadInt()">
+               /// </seealso>
+               public virtual void  WriteInt(int i)
+               {
+                       WriteByte((byte) (i >> 24));
+                       WriteByte((byte) (i >> 16));
+                       WriteByte((byte) (i >> 8));
+                       WriteByte((byte) i);
+               }
+               
+               /// <summary>Writes an int in a variable-length format.  Writes between one and
+               /// five bytes.  Smaller values take fewer bytes.  Negative numbers are not
+               /// supported.
+               /// </summary>
+               /// <seealso cref="IndexInput.ReadVInt()">
+               /// </seealso>
+               public virtual void  WriteVInt(int i)
+               {
+                       while ((i & ~ 0x7F) != 0)
+                       {
+                               WriteByte((byte) ((i & 0x7f) | 0x80));
+                               i = SupportClass.Number.URShift(i, 7);
+                       }
+                       WriteByte((byte) i);
+               }
+               
+               /// <summary>Writes a long as eight bytes.</summary>
+               /// <seealso cref="IndexInput.ReadLong()">
+               /// </seealso>
+               public virtual void  WriteLong(long i)
+               {
+                       WriteInt((int) (i >> 32));
+                       WriteInt((int) i);
+               }
+               
+               /// <summary>Writes an long in a variable-length format.  Writes between one and five
+               /// bytes.  Smaller values take fewer bytes.  Negative numbers are not
+               /// supported.
+               /// </summary>
+               /// <seealso cref="IndexInput.ReadVLong()">
+               /// </seealso>
+               public virtual void  WriteVLong(long i)
+               {
+                       while ((i & ~ 0x7F) != 0)
+                       {
+                               WriteByte((byte) ((i & 0x7f) | 0x80));
+                               i = SupportClass.Number.URShift(i, 7);
+                       }
+                       WriteByte((byte) i);
+               }
+               
+               /// <summary>Writes a string.</summary>
+               /// <seealso cref="IndexInput.ReadString()">
+               /// </seealso>
+               public virtual void  WriteString(System.String s)
+               {
+            UnicodeUtil.UTF8Result utf8Result = new UnicodeUtil.UTF8Result();
+                       UnicodeUtil.UTF16toUTF8(s, 0, s.Length, utf8Result);
+                       WriteVInt(utf8Result.length);
+                       WriteBytes(utf8Result.result, 0, utf8Result.length);
+               }
+               
+               /// <summary>Writes a sub sequence of characters from s as the old
+               /// format (modified UTF-8 encoded bytes).
+               /// </summary>
+               /// <param name="s">the source of the characters
+               /// </param>
+               /// <param name="start">the first character in the sequence
+               /// </param>
+               /// <param name="length">the number of characters in the sequence
+               /// </param>
+               /// <deprecated> -- please pre-convert to utf8 bytes
+               /// instead or use {@link #writeString}
+               /// </deprecated>
+        [Obsolete("-- please pre-convert to utf8 bytes instead or use WriteString")]
+               public virtual void  WriteChars(System.String s, int start, int length)
+               {
+                       int end = start + length;
+                       for (int i = start; i < end; i++)
+                       {
+                               int code = (int) s[i];
+                               if (code >= 0x01 && code <= 0x7F)
+                                       WriteByte((byte) code);
+                               else if (((code >= 0x80) && (code <= 0x7FF)) || code == 0)
+                               {
+                                       WriteByte((byte) (0xC0 | (code >> 6)));
+                                       WriteByte((byte) (0x80 | (code & 0x3F)));
+                               }
+                               else
+                               {
+                                       WriteByte((byte) (0xE0 | (SupportClass.Number.URShift(code, 12))));
+                                       WriteByte((byte) (0x80 | ((code >> 6) & 0x3F)));
+                                       WriteByte((byte) (0x80 | (code & 0x3F)));
+                               }
+                       }
+               }
+               
+               /// <summary>Writes a sub sequence of characters from char[] as
+               /// the old format (modified UTF-8 encoded bytes).
+               /// </summary>
+               /// <param name="s">the source of the characters
+               /// </param>
+               /// <param name="start">the first character in the sequence
+               /// </param>
+               /// <param name="length">the number of characters in the sequence
+               /// </param>
+               /// <deprecated> -- please pre-convert to utf8 bytes instead or use {@link #writeString}
+               /// </deprecated>
+        [Obsolete("-- please pre-convert to utf8 bytes instead or use WriteString")]
+               public virtual void  WriteChars(char[] s, int start, int length)
+               {
+                       int end = start + length;
+                       for (int i = start; i < end; i++)
+                       {
+                               int code = (int) s[i];
+                               if (code >= 0x01 && code <= 0x7F)
+                                       WriteByte((byte) code);
+                               else if (((code >= 0x80) && (code <= 0x7FF)) || code == 0)
+                               {
+                                       WriteByte((byte) (0xC0 | (code >> 6)));
+                                       WriteByte((byte) (0x80 | (code & 0x3F)));
+                               }
+                               else
+                               {
+                                       WriteByte((byte) (0xE0 | (SupportClass.Number.URShift(code, 12))));
+                                       WriteByte((byte) (0x80 | ((code >> 6) & 0x3F)));
+                                       WriteByte((byte) (0x80 | (code & 0x3F)));
+                               }
+                       }
+               }
+               
+               private static int COPY_BUFFER_SIZE = 16384;
+               private byte[] copyBuffer;
+               
+               /// <summary>Copy numBytes bytes from input to ourself. </summary>
+               public virtual void  CopyBytes(IndexInput input, long numBytes)
+               {
+                       System.Diagnostics.Debug.Assert(numBytes >= 0, "numBytes=" + numBytes);
+                       long left = numBytes;
+                       if (copyBuffer == null)
+                               copyBuffer = new byte[COPY_BUFFER_SIZE];
+                       while (left > 0)
+                       {
+                               int toCopy;
+                               if (left > COPY_BUFFER_SIZE)
+                                       toCopy = COPY_BUFFER_SIZE;
+                               else
+                                       toCopy = (int) left;
+                               input.ReadBytes(copyBuffer, 0, toCopy);
+                               WriteBytes(copyBuffer, 0, toCopy);
+                               left -= toCopy;
+                       }
+               }
+               
+               /// <summary>Forces any buffered output to be written. </summary>
+               public abstract void  Flush();
+               
+               /// <summary>Closes this stream to further operations. </summary>
+               public abstract void  Close();
+               
+               /// <summary>Returns the current position in this file, where the next write will
+               /// occur.
+               /// </summary>
+               /// <seealso cref="Seek(long)">
+               /// </seealso>
+               public abstract long GetFilePointer();
+               
+               /// <summary>Sets current position in this file, where the next write will occur.</summary>
+               /// <seealso cref="GetFilePointer()">
+               /// </seealso>
+               public abstract void  Seek(long pos);
+               
+               /// <summary>The number of bytes in the file. </summary>
+               public abstract long Length();
+               
+               /// <summary>Set the file length. By default, this method does
+               /// nothing (it's optional for a Directory to implement
+               /// it).  But, certain Directory implementations (for
+               /// </summary>
+               /// <seealso cref="FSDirectory"> can use this to inform the
+               /// underlying IO system to pre-allocate the file to the
+               /// specified size.  If the length is longer than the
+               /// current file length, the bytes added to the file are
+               /// undefined.  Otherwise the file is truncated.
+               /// </seealso>
+               /// <param name="length">file length
+               /// </param>
+               public virtual void  SetLength(long length)
+               {
+               }
+               
+               
+               // map must be Map<String, String>
+               public virtual void  WriteStringStringMap(System.Collections.Generic.IDictionary<string,string> map)
+               {
+                       if (map == null)
+                       {
+                               WriteInt(0);
+                       }
+                       else
+                       {
+                               WriteInt(map.Count);
+                foreach (string key in map.Keys)
+                {
+                    WriteString(key);
+                    WriteString(map[key]);
+                }
+                       }
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/Lock.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/Lock.cs
new file mode 100644 (file)
index 0000000..c716e05
--- /dev/null
@@ -0,0 +1,176 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Store
+{
+       
+       /// <summary>An interprocess mutex lock.
+       /// <p/>Typical use might look like:<pre>
+       /// new Lock.With(directory.makeLock("my.lock")) {
+       /// public Object doBody() {
+       /// <i>... code to execute while locked ...</i>
+       /// }
+       /// }.run();
+       /// </pre>
+       /// 
+       /// 
+       /// </summary>
+       /// <version>  $Id: Lock.java 769409 2009-04-28 14:05:43Z mikemccand $
+       /// </version>
+       /// <seealso cref="Directory.MakeLock(String)">
+       /// </seealso>
+       public abstract class Lock
+       {
+               
+               /// <summary>How long {@link #Obtain(long)} waits, in milliseconds,
+               /// in between attempts to acquire the lock. 
+               /// </summary>
+               public static long LOCK_POLL_INTERVAL = 1000;
+               
+               /// <summary>Pass this value to {@link #Obtain(long)} to try
+               /// forever to obtain the lock. 
+               /// </summary>
+               public const long LOCK_OBTAIN_WAIT_FOREVER = - 1;
+               
+               /// <summary>Attempts to obtain exclusive access and immediately return
+               /// upon success or failure.
+               /// </summary>
+               /// <returns> true iff exclusive access is obtained
+               /// </returns>
+               public abstract bool Obtain();
+               
+               /// <summary> If a lock obtain called, this failureReason may be set
+               /// with the "root cause" Exception as to why the lock was
+               /// not obtained.
+               /// </summary>
+               protected internal System.Exception failureReason;
+               
+               /// <summary>Attempts to obtain an exclusive lock within amount of
+               /// time given. Polls once per {@link #LOCK_POLL_INTERVAL}
+               /// (currently 1000) milliseconds until lockWaitTimeout is
+               /// passed.
+               /// </summary>
+               /// <param name="lockWaitTimeout">length of time to wait in
+               /// milliseconds or {@link
+               /// #LOCK_OBTAIN_WAIT_FOREVER} to retry forever
+               /// </param>
+               /// <returns> true if lock was obtained
+               /// </returns>
+               /// <throws>  LockObtainFailedException if lock wait times out </throws>
+               /// <throws>  IllegalArgumentException if lockWaitTimeout is </throws>
+               /// <summary>         out of bounds
+               /// </summary>
+               /// <throws>  IOException if obtain() throws IOException </throws>
+               public virtual bool Obtain(long lockWaitTimeout)
+               {
+                       failureReason = null;
+                       bool locked = Obtain();
+                       if (lockWaitTimeout < 0 && lockWaitTimeout != LOCK_OBTAIN_WAIT_FOREVER)
+                               throw new System.ArgumentException("lockWaitTimeout should be LOCK_OBTAIN_WAIT_FOREVER or a non-negative number (got " + lockWaitTimeout + ")");
+                       
+                       long maxSleepCount = lockWaitTimeout / LOCK_POLL_INTERVAL;
+                       long sleepCount = 0;
+                       while (!locked)
+                       {
+                               if (lockWaitTimeout != LOCK_OBTAIN_WAIT_FOREVER && sleepCount++ >= maxSleepCount)
+                               {
+                                       System.String reason = "Lock obtain timed out: " + this.ToString();
+                                       if (failureReason != null)
+                                       {
+                                               reason += (": " + failureReason);
+                                       }
+                    LockObtainFailedException e;
+                    if (failureReason != null)
+                    {
+                        e = new LockObtainFailedException(reason, failureReason);
+                    }
+                    else
+                    {
+                        e = new LockObtainFailedException(reason);
+                    }
+                    throw e;
+                               }
+                               try
+                               {
+                                       System.Threading.Thread.Sleep(new System.TimeSpan((System.Int64) 10000 * LOCK_POLL_INTERVAL));
+                               }
+                               catch (System.Threading.ThreadInterruptedException e)
+                               {
+                                       // In 3.0 we will change this to throw
+                                       // InterruptedException instead
+                                       SupportClass.ThreadClass.Current().Interrupt();
+                                       throw new System.IO.IOException(e.ToString());
+                               }
+                               locked = Obtain();
+                       }
+                       return locked;
+               }
+               
+               /// <summary>Releases exclusive access. </summary>
+               public abstract void  Release();
+               
+               /// <summary>Returns true if the resource is currently locked.  Note that one must
+               /// still call {@link #Obtain()} before using the resource. 
+               /// </summary>
+               public abstract bool IsLocked();
+               
+               
+               /// <summary>Utility class for executing code with exclusive access. </summary>
+               public abstract class With
+               {
+                       private Lock lock_Renamed;
+                       private long lockWaitTimeout;
+                       
+                       
+                       /// <summary>Constructs an executor that will grab the named lock. </summary>
+                       public With(Lock lock_Renamed, long lockWaitTimeout)
+                       {
+                               this.lock_Renamed = lock_Renamed;
+                               this.lockWaitTimeout = lockWaitTimeout;
+                       }
+                       
+                       /// <summary>Code to execute with exclusive access. </summary>
+                       protected internal abstract System.Object DoBody();
+                       
+                       /// <summary>Calls {@link #doBody} while <i>lock</i> is obtained.  Blocks if lock
+                       /// cannot be obtained immediately.  Retries to obtain lock once per second
+                       /// until it is obtained, or until it has tried ten times. Lock is released when
+                       /// {@link #doBody} exits.
+                       /// </summary>
+                       /// <throws>  LockObtainFailedException if lock could not </throws>
+                       /// <summary> be obtained
+                       /// </summary>
+                       /// <throws>  IOException if {@link Lock#obtain} throws IOException </throws>
+                       public virtual System.Object run()
+                       {
+                               bool locked = false;
+                               try
+                               {
+                                       locked = lock_Renamed.Obtain(lockWaitTimeout);
+                                       return DoBody();
+                               }
+                               finally
+                               {
+                                       if (locked)
+                                               lock_Renamed.Release();
+                               }
+                       }
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/LockFactory.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/LockFactory.cs
new file mode 100644 (file)
index 0000000..0062a02
--- /dev/null
@@ -0,0 +1,77 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Store
+{
+       
+       /// <summary> <p/>Base class for Locking implementation.  {@link Directory} uses
+       /// instances of this class to implement locking.<p/>
+       /// 
+       /// <p/>Note that there are some useful tools to verify that
+       /// your LockFactory is working correctly: {@link
+       /// VerifyingLockFactory}, {@link LockStressTest}, {@link
+       /// LockVerifyServer}.<p/>
+       /// 
+       /// </summary>
+       /// <seealso cref="LockVerifyServer">
+       /// </seealso>
+       /// <seealso cref="LockStressTest">
+       /// </seealso>
+       /// <seealso cref="VerifyingLockFactory">
+       /// </seealso>
+       
+       public abstract class LockFactory
+       {
+               
+               protected internal System.String lockPrefix = null;
+               
+               /// <summary> Set the prefix in use for all locks created in this
+               /// LockFactory.  This is normally called once, when a
+               /// Directory gets this LockFactory instance.  However, you
+               /// can also call this (after this instance is assigned to
+               /// a Directory) to override the prefix in use.  This
+               /// is helpful if you're running Lucene on machines that
+               /// have different mount points for the same shared
+               /// directory.
+               /// </summary>
+               public virtual void  SetLockPrefix(System.String lockPrefix)
+               {
+                       this.lockPrefix = lockPrefix;
+               }
+               
+               /// <summary> Get the prefix in use for all locks created in this LockFactory.</summary>
+               public virtual System.String GetLockPrefix()
+               {
+                       return this.lockPrefix;
+               }
+               
+               /// <summary> Return a new Lock instance identified by lockName.</summary>
+               /// <param name="lockName">name of the lock to be created.
+               /// </param>
+               public abstract Lock MakeLock(System.String lockName);
+               
+               /// <summary> Attempt to clear (forcefully unlock and remove) the
+               /// specified lock.  Only call this at a time when you are
+               /// certain this lock is no longer in use.
+               /// </summary>
+               /// <param name="lockName">name of the lock to be cleared.
+               /// </param>
+               abstract public void  ClearLock(System.String lockName);
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/LockObtainFailedException.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/LockObtainFailedException.cs
new file mode 100644 (file)
index 0000000..bafa2a2
--- /dev/null
@@ -0,0 +1,41 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Store
+{
+       
+       /// <summary> This exception is thrown when the <code>write.lock</code>
+       /// could not be acquired.  This
+       /// happens when a writer tries to open an index
+       /// that another writer already has open.
+       /// </summary>
+       /// <seealso cref="Lock.obtain(long)">
+       /// </seealso>
+       [Serializable]
+       public class LockObtainFailedException:System.IO.IOException
+       {
+               public LockObtainFailedException(System.String message):base(message)
+               {
+               }
+
+        public LockObtainFailedException(System.String message, System.Exception ex) : base(message, ex)
+        {
+        }
+    }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/LockReleaseFailedException.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/LockReleaseFailedException.cs
new file mode 100644 (file)
index 0000000..69df5ad
--- /dev/null
@@ -0,0 +1,35 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Store
+{
+       
+       /// <summary> This exception is thrown when the <code>write.lock</code>
+       /// could not be released.
+       /// </summary>
+       /// <seealso cref="Lock.release()">
+       /// </seealso>
+       [Serializable]
+       public class LockReleaseFailedException:System.IO.IOException
+       {
+               public LockReleaseFailedException(System.String message):base(message)
+               {
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/LockStressTest.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/LockStressTest.cs
new file mode 100644 (file)
index 0000000..90d45f2
--- /dev/null
@@ -0,0 +1,128 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Store
+{
+       
+       /// <summary> Simple standalone tool that forever acquires &amp; releases a
+       /// lock using a specific LockFactory.  Run without any args
+       /// to see usage.
+       /// 
+       /// </summary>
+       /// <seealso cref="VerifyingLockFactory">
+       /// </seealso>
+       /// <seealso cref="LockVerifyServer">
+       /// </seealso>
+       
+       public class LockStressTest
+       {
+               
+               [STAThread]
+               public static void  Main(System.String[] args)
+               {
+                       
+                       if (args.Length != 6)
+                       {
+                               System.Console.Out.WriteLine("\nUsage: java Mono.Lucene.Net.Store.LockStressTest myID verifierHostOrIP verifierPort lockFactoryClassName lockDirName sleepTime\n" + "\n" + "  myID = int from 0 .. 255 (should be unique for test process)\n" + "  verifierHostOrIP = host name or IP address where LockVerifyServer is running\n" + "  verifierPort = port that LockVerifyServer is listening on\n" + "  lockFactoryClassName = primary LockFactory class that we will use\n" + "  lockDirName = path to the lock directory (only set for Simple/NativeFSLockFactory\n" + "  sleepTimeMS = milliseconds to pause betweeen each lock obtain/release\n" + "\n" + "You should run multiple instances of this process, each with its own\n" + "unique ID, and each pointing to the same lock directory, to verify\n" + "that locking is working correctly.\n" + "\n" + "Make sure you are first running LockVerifyServer.\n" + "\n");
+                               System.Environment.Exit(1);
+                       }
+                       
+                       int myID = System.Int32.Parse(args[0]);
+                       
+                       if (myID < 0 || myID > 255)
+                       {
+                               System.Console.Out.WriteLine("myID must be a unique int 0..255");
+                               System.Environment.Exit(1);
+                       }
+                       
+                       System.String verifierHost = args[1];
+                       int verifierPort = System.Int32.Parse(args[2]);
+                       System.String lockFactoryClassName = args[3];
+                       System.String lockDirName = args[4];
+                       int sleepTimeMS = System.Int32.Parse(args[5]);
+                       
+                       System.Type c;
+                       try
+                       {
+                               c = System.Type.GetType(lockFactoryClassName);
+                       }
+                       catch (System.Exception e)
+                       {
+                               throw new System.IO.IOException("unable to find LockClass " + lockFactoryClassName);
+                       }
+                       
+                       LockFactory lockFactory;
+                       try
+                       {
+                               lockFactory = (LockFactory) System.Activator.CreateInstance(c);
+                       }
+                       catch (System.UnauthorizedAccessException e)
+                       {
+                               throw new System.IO.IOException("IllegalAccessException when instantiating LockClass " + lockFactoryClassName);
+                       }
+                       catch (System.InvalidCastException e)
+                       {
+                               throw new System.IO.IOException("unable to cast LockClass " + lockFactoryClassName + " instance to a LockFactory");
+                       }
+                       catch (System.Exception e)
+                       {
+                               throw new System.IO.IOException("InstantiationException when instantiating LockClass " + lockFactoryClassName);
+                       }
+                       
+                       System.IO.DirectoryInfo lockDir = new System.IO.DirectoryInfo(lockDirName);
+                       
+                       if (lockFactory is NativeFSLockFactory)
+                       {
+                               ((NativeFSLockFactory) lockFactory).SetLockDir(lockDir);
+                       }
+                       else if (lockFactory is SimpleFSLockFactory)
+                       {
+                               ((SimpleFSLockFactory) lockFactory).SetLockDir(lockDir);
+                       }
+                       
+                       lockFactory.SetLockPrefix("test");
+                       
+                       LockFactory verifyLF = new VerifyingLockFactory((sbyte) myID, lockFactory, verifierHost, verifierPort);
+                       
+                       Lock l = verifyLF.MakeLock("test.lock");
+                       
+                       while (true)
+                       {
+                               
+                               bool obtained = false;
+                               
+                               try
+                               {
+                                       obtained = l.Obtain(10);
+                               }
+                               catch (LockObtainFailedException e)
+                               {
+                                       System.Console.Out.Write("x");
+                               }
+                               
+                               if (obtained)
+                               {
+                                       System.Console.Out.Write("l");
+                                       l.Release();
+                               }
+                               System.Threading.Thread.Sleep(new System.TimeSpan((System.Int64) 10000 * sleepTimeMS));
+                       }
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/LockVerifyServer.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/LockVerifyServer.cs
new file mode 100644 (file)
index 0000000..e587520
--- /dev/null
@@ -0,0 +1,110 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Store
+{
+       
+       /// <summary> Simple standalone server that must be running when you
+       /// use {@link VerifyingLockFactory}.  This server simply
+       /// verifies at most one process holds the lock at a time.
+       /// Run without any args to see usage.
+       /// 
+       /// </summary>
+       /// <seealso cref="VerifyingLockFactory">
+       /// </seealso>
+       /// <seealso cref="LockStressTest">
+       /// </seealso>
+       
+       public class LockVerifyServer
+       {
+               
+               private static System.String GetTime(long startTime)
+               {
+                       return "[" + (((DateTime.Now.Ticks / TimeSpan.TicksPerMillisecond) - startTime) / 1000) + "s] ";
+               }
+               
+               [STAThread]
+               public static void  Main(System.String[] args)
+               {
+                       
+                       if (args.Length != 1)
+                       {
+                               System.Console.Out.WriteLine("\nUsage: java Mono.Lucene.Net.Store.LockVerifyServer port\n");
+                               System.Environment.Exit(1);
+                       }
+                       
+                       int port = System.Int32.Parse(args[0]);
+                       
+                       System.Net.Sockets.TcpListener temp_tcpListener;
+                       temp_tcpListener = new System.Net.Sockets.TcpListener(System.Net.Dns.GetHostEntry(System.Net.Dns.GetHostName()).AddressList[0], port);
+                       temp_tcpListener.Server.SetSocketOption(System.Net.Sockets.SocketOptionLevel.Socket, System.Net.Sockets.SocketOptionName.ReuseAddress, 1);
+                       temp_tcpListener.Start();
+                       System.Net.Sockets.TcpListener s = temp_tcpListener;
+                       System.Console.Out.WriteLine("\nReady on port " + port + "...");
+                       
+                       int lockedID = 0;
+                       long startTime = (DateTime.Now.Ticks / TimeSpan.TicksPerMillisecond);
+                       
+                       while (true)
+                       {
+                               System.Net.Sockets.TcpClient cs = s.AcceptTcpClient();
+                               System.IO.Stream out_Renamed = cs.GetStream();
+                               System.IO.Stream in_Renamed = cs.GetStream();
+                               
+                               int id = in_Renamed.ReadByte();
+                               int command = in_Renamed.ReadByte();
+                               
+                               bool err = false;
+                               
+                               if (command == 1)
+                               {
+                                       // Locked
+                                       if (lockedID != 0)
+                                       {
+                                               err = true;
+                                               System.Console.Out.WriteLine(GetTime(startTime) + " ERROR: id " + id + " got lock, but " + lockedID + " already holds the lock");
+                                       }
+                                       lockedID = id;
+                               }
+                               else if (command == 0)
+                               {
+                                       if (lockedID != id)
+                                       {
+                                               err = true;
+                                               System.Console.Out.WriteLine(GetTime(startTime) + " ERROR: id " + id + " released the lock, but " + lockedID + " is the one holding the lock");
+                                       }
+                                       lockedID = 0;
+                               }
+                               else
+                                       throw new System.SystemException("unrecognized command " + command);
+                               
+                               System.Console.Out.Write(".");
+                               
+                               if (err)
+                                       out_Renamed.WriteByte((System.Byte) 1);
+                               else
+                                       out_Renamed.WriteByte((System.Byte) 0);
+                               
+                               out_Renamed.Close();
+                               in_Renamed.Close();
+                               cs.Close();
+                       }
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/MMapDirectory.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/MMapDirectory.cs
new file mode 100644 (file)
index 0000000..a61218b
--- /dev/null
@@ -0,0 +1,561 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using Constants = Mono.Lucene.Net.Util.Constants;
+
+namespace Mono.Lucene.Net.Store
+{
+       
+       /// <summary>File-based {@link Directory} implementation that uses
+       /// mmap for reading, and {@link
+       /// SimpleFSDirectory.SimpleFSIndexOutput} for writing.
+       /// 
+       /// <p/><b>NOTE</b>: memory mapping uses up a portion of the
+       /// virtual memory address space in your process equal to the
+       /// size of the file being mapped.  Before using this class,
+       /// be sure your have plenty of virtual address space, e.g. by
+       /// using a 64 bit JRE, or a 32 bit JRE with indexes that are
+       /// guaranteed to fit within the address space.
+       /// On 32 bit platforms also consult {@link #setMaxChunkSize}
+       /// if you have problems with mmap failing because of fragmented
+       /// address space. If you get an OutOfMemoryException, it is recommened
+       /// to reduce the chunk size, until it works.
+       /// 
+       /// <p/>Due to <a href="http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=4724038">
+       /// this bug</a> in Sun's JRE, MMapDirectory's {@link IndexInput#close}
+       /// is unable to close the underlying OS file handle.  Only when GC
+       /// finally collects the underlying objects, which could be quite
+       /// some time later, will the file handle be closed.
+       /// 
+       /// <p/>This will consume additional transient disk usage: on Windows,
+       /// attempts to delete or overwrite the files will result in an
+       /// exception; on other platforms, which typically have a &quot;delete on
+       /// last close&quot; semantics, while such operations will succeed, the bytes
+       /// are still consuming space on disk.  For many applications this
+       /// limitation is not a problem (e.g. if you have plenty of disk space,
+       /// and you don't rely on overwriting files on Windows) but it's still
+       /// an important limitation to be aware of.
+       /// 
+       /// <p/>This class supplies the workaround mentioned in the bug report
+       /// (disabled by default, see {@link #setUseUnmap}), which may fail on
+       /// non-Sun JVMs. It forcefully unmaps the buffer on close by using
+       /// an undocumented internal cleanup functionality.
+       /// {@link #UNMAP_SUPPORTED} is <code>true</code>, if the workaround
+       /// can be enabled (with no guarantees).
+       /// </summary>
+       public class MMapDirectory:FSDirectory
+       {
+               private class AnonymousClassPrivilegedExceptionAction // : SupportClass.IPriviligedAction   // {{Aroush-2.9}}
+               {
+                       public AnonymousClassPrivilegedExceptionAction(byte[] buffer, MMapDirectory enclosingInstance)
+                       {
+                               InitBlock(buffer, enclosingInstance);
+                       }
+                       private void  InitBlock(byte[] buffer, MMapDirectory enclosingInstance)
+                       {
+                               this.buffer = buffer;
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private byte[] buffer;
+                       private MMapDirectory enclosingInstance;
+                       public MMapDirectory Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       public virtual System.Object Run()
+                       {
+                // {{Aroush-2.9
+                /*
+                               System.Reflection.MethodInfo getCleanerMethod = buffer.GetType().GetMethod("cleaner", (Mono.Lucene.Net.Store.MMapDirectory.NO_PARAM_TYPES == null)?new System.Type[0]:(System.Type[]) Mono.Lucene.Net.Store.MMapDirectory.NO_PARAM_TYPES);
+                getCleanerMethod.SetAccessible(true);
+                               System.Object cleaner = getCleanerMethod.Invoke(buffer, (System.Object[]) Mono.Lucene.Net.Store.MMapDirectory.NO_PARAMS);
+                               if (cleaner != null)
+                               {
+                                       cleaner.GetType().GetMethod("clean", (Mono.Lucene.Net.Store.MMapDirectory.NO_PARAM_TYPES == null)?new System.Type[0]:(System.Type[]) Mono.Lucene.Net.Store.MMapDirectory.NO_PARAM_TYPES).Invoke(cleaner, (System.Object[]) Mono.Lucene.Net.Store.MMapDirectory.NO_PARAMS);
+                               }
+                */
+                System.Diagnostics.Debug.Fail("Port issue:", "sun.misc.Cleaner()"); // {{Aroush-2.9}}
+                // Aroush-2.9}}
+                               return null;
+                       }
+               }
+               private void  InitBlock()
+               {
+                       maxBBuf = Constants.JRE_IS_64BIT?System.Int32.MaxValue:(256 * 1024 * 1024);
+               }
+               
+               /// <summary>Create a new MMapDirectory for the named location.
+               /// 
+               /// </summary>
+               /// <param name="path">the path of the directory
+               /// </param>
+               /// <param name="lockFactory">the lock factory to use, or null for the default.
+               /// </param>
+               /// <throws>  IOException </throws>
+               [System.Obsolete("Use the constructor that takes a DirectoryInfo, this will be removed in the 3.0 release")]
+               public MMapDirectory(System.IO.FileInfo path, LockFactory lockFactory):base(new System.IO.DirectoryInfo(path.FullName), lockFactory)
+               {
+                       InitBlock();
+               }
+               
+        /// <summary>Create a new MMapDirectory for the named location.
+        /// 
+        /// </summary>
+        /// <param name="path">the path of the directory
+        /// </param>
+        /// <param name="lockFactory">the lock factory to use, or null for the default.
+        /// </param>
+        /// <throws>  IOException </throws>
+        public MMapDirectory(System.IO.DirectoryInfo path, LockFactory lockFactory) : base(path, lockFactory)
+        {
+            InitBlock();
+        }
+               
+               /// <summary>Create a new MMapDirectory for the named location and the default lock factory.
+               /// 
+               /// </summary>
+               /// <param name="path">the path of the directory
+               /// </param>
+               /// <throws>  IOException </throws>
+               [System.Obsolete("Use the constructor that takes a DirectoryInfo, this will be removed in the 3.0 release")]
+               public MMapDirectory(System.IO.FileInfo path):base(new System.IO.DirectoryInfo(path.FullName), null)
+               {
+                       InitBlock();
+               }
+               
+        /// <summary>Create a new MMapDirectory for the named location and the default lock factory.
+        /// 
+        /// </summary>
+        /// <param name="path">the path of the directory
+        /// </param>
+        /// <throws>  IOException </throws>
+        public MMapDirectory(System.IO.DirectoryInfo path) : base(path, null)
+        {
+            InitBlock();
+        }
+               
+               // back compatibility so FSDirectory can instantiate via reflection
+               /// <deprecated> 
+               /// </deprecated>
+        [Obsolete]
+               internal MMapDirectory()
+               {
+                       InitBlock();
+               }
+               
+               internal static readonly System.Type[] NO_PARAM_TYPES = new System.Type[0];
+               internal static readonly System.Object[] NO_PARAMS = new System.Object[0];
+               
+               private bool useUnmapHack = false;
+               private int maxBBuf;
+               
+               /// <summary> <code>true</code>, if this platform supports unmapping mmaped files.</summary>
+               public static bool UNMAP_SUPPORTED;
+               
+               /// <summary> This method enables the workaround for unmapping the buffers
+               /// from address space after closing {@link IndexInput}, that is
+               /// mentioned in the bug report. This hack may fail on non-Sun JVMs.
+               /// It forcefully unmaps the buffer on close by using
+               /// an undocumented internal cleanup functionality.
+               /// <p/><b>NOTE:</b> Enabling this is completely unsupported
+               /// by Java and may lead to JVM crashs if <code>IndexInput</code>
+               /// is closed while another thread is still accessing it (SIGSEGV).
+               /// </summary>
+               /// <throws>  IllegalArgumentException if {@link #UNMAP_SUPPORTED} </throws>
+               /// <summary> is <code>false</code> and the workaround cannot be enabled.
+               /// </summary>
+               public virtual void  SetUseUnmap(bool useUnmapHack)
+               {
+                       if (useUnmapHack && !UNMAP_SUPPORTED)
+                               throw new System.ArgumentException("Unmap hack not supported on this platform!");
+                       this.useUnmapHack = useUnmapHack;
+               }
+               
+               /// <summary> Returns <code>true</code>, if the unmap workaround is enabled.</summary>
+               /// <seealso cref="setUseUnmap">
+               /// </seealso>
+               public virtual bool GetUseUnmap()
+               {
+                       return useUnmapHack;
+               }
+               
+               /// <summary> Try to unmap the buffer, this method silently fails if no support
+               /// for that in the JVM. On Windows, this leads to the fact,
+               /// that mmapped files cannot be modified or deleted.
+               /// </summary>
+               internal void  CleanMapping(System.IO.MemoryStream buffer)
+               {
+                       if (useUnmapHack)
+                       {
+                               try
+                               {
+                    // {{Aroush-2.9}} Not converted: java.security.AccessController.doPrivileged()
+                    System.Diagnostics.Debug.Fail("Port issue:", "java.security.AccessController.doPrivileged()"); // {{Aroush-2.9}}
+                                       // AccessController.DoPrivileged(new AnonymousClassPrivilegedExceptionAction(buffer, this));
+                               }
+                               catch (System.Exception e)
+                               {
+                                       System.IO.IOException ioe = new System.IO.IOException("unable to unmap the mapped buffer", e.InnerException);
+                                       throw ioe;
+                               }
+                       }
+               }
+               
+               /// <summary> Sets the maximum chunk size (default is {@link Integer#MAX_VALUE} for
+               /// 64 bit JVMs and 256 MiBytes for 32 bit JVMs) used for memory mapping.
+               /// Especially on 32 bit platform, the address space can be very fragmented,
+               /// so large index files cannot be mapped.
+               /// Using a lower chunk size makes the directory implementation a little
+               /// bit slower (as the correct chunk must be resolved on each seek)
+               /// but the chance is higher that mmap does not fail. On 64 bit
+               /// Java platforms, this parameter should always be {@link Integer#MAX_VALUE},
+               /// as the adress space is big enough.
+               /// </summary>
+               public virtual void  SetMaxChunkSize(int maxBBuf)
+               {
+                       if (maxBBuf <= 0)
+                               throw new System.ArgumentException("Maximum chunk size for mmap must be >0");
+                       this.maxBBuf = maxBBuf;
+               }
+               
+               /// <summary> Returns the current mmap chunk size.</summary>
+               /// <seealso cref="setMaxChunkSize">
+               /// </seealso>
+               public virtual int GetMaxChunkSize()
+               {
+                       return maxBBuf;
+               }
+               
+               private class MMapIndexInput:IndexInput, System.ICloneable
+               {
+                       private void  InitBlock(MMapDirectory enclosingInstance)
+                       {
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private MMapDirectory enclosingInstance;
+                       public MMapDirectory Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       
+                       private System.IO.MemoryStream buffer;
+                       private long length;
+                       private bool isClone = false;
+                       
+                       internal MMapIndexInput(MMapDirectory enclosingInstance, System.IO.FileStream raf)
+                       {
+                byte[] data = new byte[raf.Length];
+                raf.Read(data, 0, (int) raf.Length);
+
+                               InitBlock(enclosingInstance);
+                               this.length = raf.Length;
+                               this.buffer = new System.IO.MemoryStream(data);
+                       }
+                       
+                       public override byte ReadByte()
+                       {
+                               try
+                               {
+                                       return (byte) buffer.ReadByte();
+                               }
+                               catch (ObjectDisposedException e)
+                               {
+                                       throw new System.IO.IOException("read past EOF");
+                               }
+                       }
+                       
+                       public override void  ReadBytes(byte[] b, int offset, int len)
+                       {
+                               try
+                               {
+                                       buffer.Read(b, offset, len);
+                               }
+                               catch (ObjectDisposedException e)
+                               {
+                                       throw new System.IO.IOException("read past EOF");
+                               }
+                       }
+                       
+                       public override long GetFilePointer()
+                       {
+                               return buffer.Position;;
+                       }
+                       
+                       public override void  Seek(long pos)
+                       {
+                               buffer.Seek(pos, System.IO.SeekOrigin.Begin);
+                       }
+                       
+                       public override long Length()
+                       {
+                               return length;
+                       }
+                       
+                       public override System.Object Clone()
+                       {
+                if (buffer == null)
+                    throw new AlreadyClosedException("MMapIndexInput already closed");
+                               MMapIndexInput clone = (MMapIndexInput) base.Clone();
+                               clone.isClone = true;
+                               // clone.buffer = buffer.duplicate();   // {{Aroush-1.9}}
+                               return clone;
+                       }
+                       
+                       public override void  Close()
+                       {
+                               if (isClone || buffer == null)
+                                       return ;
+                               // unmap the buffer (if enabled) and at least unset it for GC
+                               try
+                               {
+                                       Enclosing_Instance.CleanMapping(buffer);
+                               }
+                               finally
+                               {
+                                       buffer = null;
+                               }
+                       }
+               }
+               
+               // Because Java's ByteBuffer uses an int to address the
+               // values, it's necessary to access a file >
+               // Integer.MAX_VALUE in size using multiple byte buffers.
+               private class MultiMMapIndexInput:IndexInput, System.ICloneable
+               {
+                       private void  InitBlock(MMapDirectory enclosingInstance)
+                       {
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private MMapDirectory enclosingInstance;
+                       public MMapDirectory Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       
+                       private System.IO.MemoryStream[] buffers;
+                       private int[] bufSizes; // keep here, ByteBuffer.size() method is optional
+                       
+                       private long length;
+                       
+                       private int curBufIndex;
+                       private int maxBufSize;
+                       
+                       private System.IO.MemoryStream curBuf; // redundant for speed: buffers[curBufIndex]
+                       private int curAvail; // redundant for speed: (bufSizes[curBufIndex] - curBuf.position())
+                       
+                       private bool isClone = false;
+                       
+                       public MultiMMapIndexInput(MMapDirectory enclosingInstance, System.IO.FileStream raf, int maxBufSize)
+                       {
+                               InitBlock(enclosingInstance);
+                               this.length = raf.Length;
+                               this.maxBufSize = maxBufSize;
+                               
+                               if (maxBufSize <= 0)
+                                       throw new System.ArgumentException("Non positive maxBufSize: " + maxBufSize);
+                               
+                               if ((length / maxBufSize) > System.Int32.MaxValue)
+                               {
+                                       throw new System.ArgumentException("RandomAccessFile too big for maximum buffer size: " + raf.ToString());
+                               }
+                               
+                               int nrBuffers = (int) (length / maxBufSize);
+                               if (((long) nrBuffers * maxBufSize) < length)
+                                       nrBuffers++;
+                               
+                               this.buffers = new System.IO.MemoryStream[nrBuffers];
+                               this.bufSizes = new int[nrBuffers];
+                               
+                               long bufferStart = 0;
+                               System.IO.FileStream rafc = raf;
+                               for (int bufNr = 0; bufNr < nrBuffers; bufNr++)
+                               {
+                    byte[] data = new byte[rafc.Length];
+                    raf.Read(data, 0, (int) rafc.Length);
+
+                                       int bufSize = (length > (bufferStart + maxBufSize))?maxBufSize:(int) (length - bufferStart);
+                                       this.buffers[bufNr] = new System.IO.MemoryStream(data);
+                                       this.bufSizes[bufNr] = bufSize;
+                                       bufferStart += bufSize;
+                               }
+                               Seek(0L);
+                       }
+                       
+                       public override byte ReadByte()
+                       {
+                               // Performance might be improved by reading ahead into an array of
+                               // e.g. 128 bytes and readByte() from there.
+                               if (curAvail == 0)
+                               {
+                                       curBufIndex++;
+                                       if (curBufIndex >= buffers.Length)
+                                               throw new System.IO.IOException("read past EOF");
+                                       curBuf = buffers[curBufIndex];
+                                       curBuf.Seek(0, System.IO.SeekOrigin.Begin);
+                                       curAvail = bufSizes[curBufIndex];
+                               }
+                               curAvail--;
+                               return (byte) curBuf.ReadByte();
+                       }
+                       
+                       public override void  ReadBytes(byte[] b, int offset, int len)
+                       {
+                               while (len > curAvail)
+                               {
+                                       curBuf.Read(b, offset, curAvail);
+                                       len -= curAvail;
+                                       offset += curAvail;
+                                       curBufIndex++;
+                                       if (curBufIndex >= buffers.Length)
+                                               throw new System.IO.IOException("read past EOF");
+                                       curBuf = buffers[curBufIndex];
+                                       curBuf.Seek(0, System.IO.SeekOrigin.Begin);
+                                       curAvail = bufSizes[curBufIndex];
+                               }
+                               curBuf.Read(b, offset, len);
+                               curAvail -= len;
+                       }
+                       
+                       public override long GetFilePointer()
+                       {
+                               return ((long) curBufIndex * maxBufSize) + curBuf.Position;
+                       }
+                       
+                       public override void  Seek(long pos)
+                       {
+                               curBufIndex = (int) (pos / maxBufSize);
+                               curBuf = buffers[curBufIndex];
+                               int bufOffset = (int) (pos - ((long) curBufIndex * maxBufSize));
+                               curBuf.Seek(bufOffset, System.IO.SeekOrigin.Begin);
+                               curAvail = bufSizes[curBufIndex] - bufOffset;
+                       }
+                       
+                       public override long Length()
+                       {
+                               return length;
+                       }
+                       
+                       public override System.Object Clone()
+                       {
+                               MultiMMapIndexInput clone = (MultiMMapIndexInput) base.Clone();
+                               clone.isClone = true;
+                               clone.buffers = new System.IO.MemoryStream[buffers.Length];
+                               // No need to clone bufSizes.
+                               // Since most clones will use only one buffer, duplicate() could also be
+                               // done lazy in clones, e.g. when adapting curBuf.
+                               for (int bufNr = 0; bufNr < buffers.Length; bufNr++)
+                               {
+                                       clone.buffers[bufNr] = buffers[bufNr];    // clone.buffers[bufNr] = buffers[bufNr].duplicate();   // {{Aroush-1.9}} how do we clone?!
+                               }
+                               try
+                               {
+                                       clone.Seek(GetFilePointer());
+                               }
+                               catch (System.IO.IOException ioe)
+                               {
+                                       System.SystemException newException = new System.SystemException(ioe.Message, ioe);
+                                       throw newException;
+                               }
+                               return clone;
+                       }
+                       
+                       public override void  Close()
+                       {
+                               if (isClone || buffers == null)
+                                       return ;
+                               try
+                               {
+                                       for (int bufNr = 0; bufNr < buffers.Length; bufNr++)
+                                       {
+                                               // unmap the buffer (if enabled) and at least unset it for GC
+                                               try
+                                               {
+                                                       Enclosing_Instance.CleanMapping(buffers[bufNr]);
+                                               }
+                                               finally
+                                               {
+                                                       buffers[bufNr] = null;
+                                               }
+                                       }
+                               }
+                               finally
+                               {
+                                       buffers = null;
+                               }
+                       }
+               }
+               
+               /// <summary>Creates an IndexInput for the file with the given name. </summary>
+               public override IndexInput OpenInput(System.String name, int bufferSize)
+               {
+                       EnsureOpen();
+                       System.String path = System.IO.Path.Combine(GetDirectory().FullName, name);
+                       System.IO.FileStream raf = new System.IO.FileStream(path, System.IO.FileMode.Open, System.IO.FileAccess.Read);
+                       try
+                       {
+                               return (raf.Length <= (long) maxBBuf)?(IndexInput) new MMapIndexInput(this, raf):(IndexInput) new MultiMMapIndexInput(this, raf, maxBBuf);
+                       }
+                       finally
+                       {
+                               raf.Close();
+                       }
+               }
+               
+               /// <summary>Creates an IndexOutput for the file with the given name. </summary>
+               public override IndexOutput CreateOutput(System.String name)
+               {
+                       InitOutput(name);
+                       return new SimpleFSDirectory.SimpleFSIndexOutput(new System.IO.FileInfo(System.IO.Path.Combine(directory.FullName, name)));
+               }
+               static MMapDirectory()
+               {
+                       {
+                               bool v;
+                               try
+                               {
+                    // {{Aroush-2.9
+                                       /*
+                    System.Type.GetType("sun.misc.Cleaner"); // {{Aroush-2.9}} port issue?
+                                       System.Type.GetType("java.nio.DirectByteBuffer").GetMethod("cleaner", (NO_PARAM_TYPES == null)?new System.Type[0]:(System.Type[]) NO_PARAM_TYPES);
+                    */
+                    System.Diagnostics.Debug.Fail("Port issue:", "sun.misc.Cleaner.clean()"); // {{Aroush-2.9}}
+                    // Aroush-2.9}}
+                                       v = true;
+                               }
+                               catch (System.Exception e)
+                               {
+                                       v = false;
+                               }
+                               UNMAP_SUPPORTED = v;
+                       }
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/NIOFSDirectory.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/NIOFSDirectory.cs
new file mode 100644 (file)
index 0000000..0897ea3
--- /dev/null
@@ -0,0 +1,267 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+namespace Mono.Lucene.Net.Store
+{
+    /// <summary>
+    /// Not implemented. Waiting for volunteers.
+    /// </summary>
+    public class NIOFSDirectory : Mono.Lucene.Net.Store.FSDirectory
+    {
+        public NIOFSDirectory()
+        {
+            throw new System.NotImplementedException("Waiting for volunteers to implement this class");
+
+        }
+        public NIOFSDirectory(System.IO.DirectoryInfo dir,LockFactory lockFactory)
+        {
+        }
+
+        /// <summary>
+        /// Not implemented. Waiting for volunteers.
+        /// </summary>
+        public class NIOFSIndexInput
+        {
+            public NIOFSIndexInput()
+            {
+                throw new System.NotImplementedException("Waiting for volunteers to implement this class");
+            }
+        }
+    }
+}
+
+
+//namespace Mono.Lucene.Net.Store
+//{
+       
+//    /// <summary> An {@link FSDirectory} implementation that uses
+//    /// java.nio's FileChannel's positional read, which allows
+//    /// multiple threads to read from the same file without
+//    /// synchronizing.
+//    /// 
+//    /// <p/>This class only uses FileChannel when reading; writing
+//    /// is achieved with {@link SimpleFSDirectory.SimpleFSIndexOutput}.
+//    /// 
+//    /// <p/><b>NOTE</b>: NIOFSDirectory is not recommended on Windows because of a bug
+//    /// in how FileChannel.read is implemented in Sun's JRE.
+//    /// Inside of the implementation the position is apparently
+//    /// synchronized.  See <a
+//    /// href="http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=6265734">here</a>
+//    /// for details.
+//    /// </summary>
+//    public class NIOFSDirectory:FSDirectory
+//    {
+               
+//        /// <summary>Create a new NIOFSDirectory for the named location.
+//        /// 
+//        /// </summary>
+//        /// <param name="path">the path of the directory
+//        /// </param>
+//        /// <param name="lockFactory">the lock factory to use, or null for the default.
+//        /// </param>
+//        /// <throws>  IOException </throws>
+//        [System.Obsolete("Use the constructor that takes a DirectoryInfo, this will be removed in the 3.0 release")]
+//        public NIOFSDirectory(System.IO.FileInfo path, LockFactory lockFactory):base(new System.IO.DirectoryInfo(path.FullName), lockFactory)
+//        {
+//        }
+
+//        /// <summary>Create a new NIOFSDirectory for the named location.
+//        /// 
+//        /// </summary>
+//        /// <param name="path">the path of the directory
+//        /// </param>
+//        /// <param name="lockFactory">the lock factory to use, or null for the default.
+//        /// </param>
+//        /// <throws>  IOException </throws>
+//        public NIOFSDirectory(System.IO.DirectoryInfo path, LockFactory lockFactory) : base(path, lockFactory)
+//        {
+//        }
+               
+//        /// <summary>Create a new NIOFSDirectory for the named location and the default lock factory.
+//        /// 
+//        /// </summary>
+//        /// <param name="path">the path of the directory
+//        /// </param>
+//        /// <throws>  IOException </throws>
+//        [System.Obsolete("Use the constructor that takes a DirectoryInfo, this will be removed in the 3.0 release")]
+//        public NIOFSDirectory(System.IO.FileInfo path):base(new System.IO.DirectoryInfo(path.FullName), null)
+//        {
+//        }
+
+//        /// <summary>Create a new NIOFSDirectory for the named location and the default lock factory.
+//        /// 
+//        /// </summary>
+//        /// <param name="path">the path of the directory
+//        /// </param>
+//        /// <throws>  IOException </throws>
+//        public NIOFSDirectory(System.IO.DirectoryInfo path) : base(path, null)
+//        {
+//        }
+               
+//        // back compatibility so FSDirectory can instantiate via reflection
+//        /// <deprecated> 
+//        /// </deprecated>
+//        [Obsolete]
+//        internal NIOFSDirectory()
+//        {
+//        }
+               
+//        /// <summary>Creates an IndexInput for the file with the given name. </summary>
+//        public override IndexInput OpenInput(System.String name, int bufferSize)
+//        {
+//            EnsureOpen();
+//            return new NIOFSIndexInput(new System.IO.FileInfo(System.IO.Path.Combine(GetFile().FullName, name)), bufferSize, GetReadChunkSize());
+//        }
+               
+//        /// <summary>Creates an IndexOutput for the file with the given name. </summary>
+//        public override IndexOutput CreateOutput(System.String name)
+//        {
+//            InitOutput(name);
+//            return new SimpleFSDirectory.SimpleFSIndexOutput(new System.IO.FileInfo(System.IO.Path.Combine(directory.FullName, name)));
+//        }
+               
+//        public /*protected internal*/ class NIOFSIndexInput:SimpleFSDirectory.SimpleFSIndexInput
+//        {
+                       
+//            private System.IO.MemoryStream byteBuf; // wraps the buffer for NIO
+                       
+//            private byte[] otherBuffer;
+//            private System.IO.MemoryStream otherByteBuf;
+                       
+//            internal System.IO.BinaryReader channel;
+                       
+//            /// <deprecated> Please use ctor taking chunkSize 
+//            /// </deprecated>
+//            [Obsolete("Please use ctor taking chunkSize")]
+//            public NIOFSIndexInput(System.IO.FileInfo path, int bufferSize):this(path, bufferSize, FSDirectory.DEFAULT_READ_CHUNK_SIZE)
+//            {
+//            }
+                       
+//            public NIOFSIndexInput(System.IO.FileInfo path, int bufferSize, int chunkSize):base(path, bufferSize, chunkSize)
+//            {
+//                channel = (System.IO.BinaryReader) file;
+//            }
+                       
+//            protected internal override void  NewBuffer(byte[] newBuffer)
+//            {
+//                base.NewBuffer(newBuffer);
+//                // {{Aroush-2.9}} byteBuf = ByteBuffer.wrap(newBuffer);
+//                System.Diagnostics.Debug.Fail("Port issue:", "byteBuf = ByteBuffer.wrap(newBuffer)"); // {{Aroush-2.9}}
+//            }
+                       
+//            public override void  Close()
+//            {
+//                if (!isClone && file.isOpen)
+//                {
+//                    // Close the channel & file
+//                    try
+//                    {
+//                        channel.Close();
+//                    }
+//                    finally
+//                    {
+//                        file.Close();
+//                    }
+//                }
+//            }
+                       
+//            public override void  ReadInternal(byte[] b, int offset, int len)
+//            {
+                               
+//                System.IO.MemoryStream bb;
+                               
+//                // Determine the ByteBuffer we should use
+//                if (b == buffer && 0 == offset)
+//                {
+//                    // Use our own pre-wrapped byteBuf:
+//                    System.Diagnostics.Debug.Assert(byteBuf != null);
+//                    byteBuf.Position = 0;
+//                    byteBuf.Capacity = len;
+//                    bb = byteBuf;
+//                }
+//                else
+//                {
+//                    if (offset == 0)
+//                    {
+//                        if (otherBuffer != b)
+//                        {
+//                            // Now wrap this other buffer; with compound
+//                            // file, we are repeatedly called with its
+//                            // buffer, so we wrap it once and then re-use it
+//                            // on subsequent calls
+//                            otherBuffer = b;
+//                            // otherByteBuf = ByteBuffer.wrap(b); {{Aroush-2.9}}
+//                            System.Diagnostics.Debug.Fail("Port issue:", "otherByteBuf = ByteBuffer.wrap(b)"); // {{Aroush-2.9}}
+//                        }
+//                        else
+//                            otherByteBuf.Position = 0;
+//                        otherByteBuf.Capacity = len;
+//                        bb = otherByteBuf;
+//                    }
+//                    else
+//                    {
+//                        // Always wrap when offset != 0
+//                        bb = null; // bb = ByteBuffer.wrap(b, offset, len); {{Aroush-2.9}}
+//                        System.Diagnostics.Debug.Fail("Port issue:", "bb = ByteBuffer.wrap(b, offset, len)"); // {{Aroush-2.9}}
+//                    }
+//                }
+                               
+//                int readOffset = (int) bb.Position;
+//                int readLength = bb.Capacity - readOffset;
+//                System.Diagnostics.Debug.Assert(readLength == len);
+                               
+//                long pos = GetFilePointer();
+                               
+//                try
+//                {
+//                    while (readLength > 0)
+//                    {
+//                        int limit;
+//                        if (readLength > chunkSize)
+//                        {
+//                            // LUCENE-1566 - work around JVM Bug by breaking
+//                            // very large reads into chunks
+//                            limit = readOffset + chunkSize;
+//                        }
+//                        else
+//                        {
+//                            limit = readOffset + readLength;
+//                        }
+//                        bb.Capacity = limit;
+//                        int i = -1; // int i = channel.Read(bb, pos, limit); // {{Aroush-2.9}} must read from 'channel' into 'bb'
+//                        System.Diagnostics.Debug.Fail("Port issue:", "channel.Read(bb, pos, limit)"); // {{Aroush-2.9}}
+//                        if (i == - 1)
+//                        {
+//                            throw new System.IO.IOException("read past EOF");
+//                        }
+//                        pos += i;
+//                        readOffset += i;
+//                        readLength -= i;
+//                    }
+//                }
+//                catch (System.OutOfMemoryException e)
+//                {
+//                    // propagate OOM up and add a hint for 32bit VM Users hitting the bug
+//                    // with a large chunk size in the fast path.
+//                    System.OutOfMemoryException outOfMemoryError = new System.OutOfMemoryException("OutOfMemoryError likely caused by the Sun VM Bug described in " + "https://issues.apache.org/jira/browse/LUCENE-1566; try calling FSDirectory.setReadChunkSize " + "with a a value smaller than the current chunk size (" + chunkSize + ")", e);
+//                    throw outOfMemoryError;
+//                }
+//            }
+//        }
+//    }
+//}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/NativeFSLockFactory.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/NativeFSLockFactory.cs
new file mode 100644 (file)
index 0000000..1fd9c3c
--- /dev/null
@@ -0,0 +1,431 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Store
+{
+       
+       /// <summary> <p/>Implements {@link LockFactory} using native OS file
+       /// locks.  Note that because this LockFactory relies on
+       /// java.nio.* APIs for locking, any problems with those APIs
+       /// will cause locking to fail.  Specifically, on certain NFS
+       /// environments the java.nio.* locks will fail (the lock can
+       /// incorrectly be double acquired) whereas {@link
+       /// SimpleFSLockFactory} worked perfectly in those same
+       /// environments.  For NFS based access to an index, it's
+       /// recommended that you try {@link SimpleFSLockFactory}
+       /// first and work around the one limitation that a lock file
+       /// could be left when the JVM exits abnormally.<p/>
+       /// 
+       /// <p/>The primary benefit of {@link NativeFSLockFactory} is
+       /// that lock files will be properly removed (by the OS) if
+       /// the JVM has an abnormal exit.<p/>
+       /// 
+       /// <p/>Note that, unlike {@link SimpleFSLockFactory}, the existence of
+       /// leftover lock files in the filesystem on exiting the JVM
+       /// is fine because the OS will free the locks held against
+       /// these files even though the files still remain.<p/>
+       /// 
+       /// <p/>If you suspect that this or any other LockFactory is
+       /// not working properly in your environment, you can easily
+       /// test it by using {@link VerifyingLockFactory}, {@link
+       /// LockVerifyServer} and {@link LockStressTest}.<p/>
+       /// 
+       /// </summary>
+       /// <seealso cref="LockFactory">
+       /// </seealso>
+       
+       public class NativeFSLockFactory:FSLockFactory
+       {
+               /// <summary> Create a NativeFSLockFactory instance, with null (unset)
+               /// lock directory. When you pass this factory to a {@link FSDirectory}
+               /// subclass, the lock directory is automatically set to the
+               /// directory itsself. Be sure to create one instance for each directory
+               /// your create!
+               /// </summary>
+               public NativeFSLockFactory():this((System.IO.DirectoryInfo) null)
+               {
+               }
+               
+               /// <summary> Create a NativeFSLockFactory instance, storing lock
+               /// files into the specified lockDirName:
+               /// 
+               /// </summary>
+               /// <param name="lockDirName">where lock files are created.
+               /// </param>
+               public NativeFSLockFactory(System.String lockDirName):this(new System.IO.DirectoryInfo(lockDirName))
+               {
+               }
+               
+               /// <summary> Create a NativeFSLockFactory instance, storing lock
+               /// files into the specified lockDir:
+               /// 
+               /// </summary>
+               /// <param name="lockDir">where lock files are created.
+               /// </param>
+               [System.Obsolete("Use the constructor that takes a DirectoryInfo, this will be removed in the 3.0 release")]
+               public NativeFSLockFactory(System.IO.FileInfo lockDir) : this(new System.IO.DirectoryInfo(lockDir.FullName))
+               {
+               }
+               
+        /// <summary> Create a NativeFSLockFactory instance, storing lock
+        /// files into the specified lockDir:
+        /// 
+        /// </summary>
+        /// <param name="lockDir">where lock files are created.
+        /// </param>
+        public NativeFSLockFactory(System.IO.DirectoryInfo lockDir)
+        {
+            SetLockDir(lockDir);
+        }
+               
+               public override Lock MakeLock(System.String lockName)
+               {
+                       lock (this)
+                       {
+                               if (lockPrefix != null)
+                                       lockName = lockPrefix + "-" + lockName;
+                               return new NativeFSLock(lockDir, lockName);
+                       }
+               }
+               
+               public override void  ClearLock(System.String lockName)
+               {
+                       // Note that this isn't strictly required anymore
+                       // because the existence of these files does not mean
+                       // they are locked, but, still do this in case people
+                       // really want to see the files go away:
+                       bool tmpBool;
+                       if (System.IO.File.Exists(lockDir.FullName))
+                               tmpBool = true;
+                       else
+                               tmpBool = System.IO.Directory.Exists(lockDir.FullName);
+                       if (tmpBool)
+                       {
+                               if (lockPrefix != null)
+                               {
+                                       lockName = lockPrefix + "-" + lockName;
+                               }
+                               System.IO.FileInfo lockFile = new System.IO.FileInfo(System.IO.Path.Combine(lockDir.FullName, lockName));
+                               bool tmpBool2;
+                               if (System.IO.File.Exists(lockFile.FullName))
+                                       tmpBool2 = true;
+                               else
+                                       tmpBool2 = System.IO.Directory.Exists(lockFile.FullName);
+                               bool tmpBool3;
+                               if (System.IO.File.Exists(lockFile.FullName))
+                               {
+                                       System.IO.File.Delete(lockFile.FullName);
+                                       tmpBool3 = true;
+                               }
+                               else if (System.IO.Directory.Exists(lockFile.FullName))
+                               {
+                                       System.IO.Directory.Delete(lockFile.FullName);
+                                       tmpBool3 = true;
+                               }
+                               else
+                                       tmpBool3 = false;
+                               if (tmpBool2 && !tmpBool3)
+                               {
+                                       throw new System.IO.IOException("Cannot delete " + lockFile);
+                               }
+                       }
+               }
+       }
+       
+       
+       class NativeFSLock:Lock
+       {
+               
+               private System.IO.FileStream f;
+               private System.IO.FileStream channel;
+               private bool lock_Renamed;
+               private System.IO.FileInfo path;
+               private System.IO.DirectoryInfo lockDir;
+               
+               /*
+               * The javadocs for FileChannel state that you should have
+               * a single instance of a FileChannel (per JVM) for all
+               * locking against a given file.  To ensure this, we have
+               * a single (static) HashSet that contains the file paths
+               * of all currently locked locks.  This protects against
+               * possible cases where different Directory instances in
+               * one JVM (each with their own NativeFSLockFactory
+               * instance) have set the same lock dir and lock prefix.
+               */
+               private static System.Collections.Hashtable LOCK_HELD = new System.Collections.Hashtable();
+
+               [System.Obsolete("Use the constructor that takes a DirectoryInfo, this will be removed in the 3.0 release")]
+               public NativeFSLock(System.IO.FileInfo lockDir, System.String lockFileName):this(new System.IO.DirectoryInfo(lockDir.FullName), lockFileName)
+               {
+               }
+               
+        public NativeFSLock(System.IO.DirectoryInfo lockDir, System.String lockFileName)
+        {
+            this.lockDir = lockDir;
+            path = new System.IO.FileInfo(System.IO.Path.Combine(lockDir.FullName, lockFileName));
+        }
+               
+               private bool LockExists()
+               {
+                       lock (this)
+                       {
+                               return lock_Renamed != false;
+                       }
+               }
+               
+               public override bool Obtain()
+               {
+                       lock (this)
+                       {
+                               
+                               if (LockExists())
+                               {
+                                       // Our instance is already locked:
+                                       return false;
+                               }
+                               
+                               // Ensure that lockDir exists and is a directory.
+                               bool tmpBool;
+                               if (System.IO.File.Exists(lockDir.FullName))
+                                       tmpBool = true;
+                               else
+                                       tmpBool = System.IO.Directory.Exists(lockDir.FullName);
+                               if (!tmpBool)
+                               {
+                                       try
+                    {
+                        System.IO.Directory.CreateDirectory(lockDir.FullName);
+                    }
+                    catch
+                    {
+                                               throw new System.IO.IOException("Cannot create directory: " + lockDir.FullName);
+                    }
+                               }
+                               else if (!System.IO.Directory.Exists(lockDir.FullName))
+                               {
+                                       throw new System.IO.IOException("Found regular file where directory expected: " + lockDir.FullName);
+                               }
+                               
+                               System.String canonicalPath = path.FullName;
+                               
+                               bool markedHeld = false;
+                               
+                               try
+                               {
+                                       
+                                       // Make sure nobody else in-process has this lock held
+                                       // already, and, mark it held if not:
+                                       
+                                       lock (LOCK_HELD)
+                                       {
+                                               if (LOCK_HELD.Contains(canonicalPath))
+                                               {
+                                                       // Someone else in this JVM already has the lock:
+                                                       return false;
+                                               }
+                                               else
+                                               {
+                                                       // This "reserves" the fact that we are the one
+                                                       // thread trying to obtain this lock, so we own
+                                                       // the only instance of a channel against this
+                                                       // file:
+                            LOCK_HELD.Add(canonicalPath, canonicalPath);
+                                                       markedHeld = true;
+                                               }
+                                       }
+                                       
+                                       try
+                                       {
+                                               f = new System.IO.FileStream(path.FullName, System.IO.FileMode.OpenOrCreate, System.IO.FileAccess.ReadWrite); 
+                                       }
+                                       catch (System.IO.IOException e)
+                                       {
+                                               // On Windows, we can get intermittent "Access
+                                               // Denied" here.  So, we treat this as failure to
+                                               // acquire the lock, but, store the reason in case
+                                               // there is in fact a real error case.
+                                               failureReason = e;
+                                               f = null;
+                                       }
+                                       
+                                       if (f != null)
+                                       {
+                                               try
+                                               {
+                                                       channel = f;
+                            lock_Renamed = false;
+                                                       try
+                                                       {
+                                                               channel.Lock(0, channel.Length);
+                                lock_Renamed = true;
+                                                       }
+                                                       catch (System.IO.IOException e)
+                                                       {
+                                                               // At least on OS X, we will sometimes get an
+                                                               // intermittent "Permission Denied" IOException,
+                                                               // which seems to simply mean "you failed to get
+                                                               // the lock".  But other IOExceptions could be
+                                                               // "permanent" (eg, locking is not supported via
+                                                               // the filesystem).  So, we record the failure
+                                                               // reason here; the timeout obtain (usually the
+                                                               // one calling us) will use this as "root cause"
+                                                               // if it fails to get the lock.
+                                                               failureReason = e;
+                                                       }
+                                                       finally
+                                                       {
+                                                               if (lock_Renamed == false)
+                                                               {
+                                                                       try
+                                                                       {
+                                                                               channel.Close();
+                                                                       }
+                                                                       finally
+                                                                       {
+                                                                               channel = null;
+                                                                       }
+                                                               }
+                                                       }
+                                               }
+                                               finally
+                                               {
+                                                       if (channel == null)
+                                                       {
+                                                               try
+                                                               {
+                                                                       f.Close();
+                                                               }
+                                                               finally
+                                                               {
+                                                                       f = null;
+                                                               }
+                                                       }
+                                               }
+                                       }
+                               }
+                               finally
+                               {
+                                       if (markedHeld && !LockExists())
+                                       {
+                                               lock (LOCK_HELD)
+                                               {
+                                                       if (LOCK_HELD.Contains(canonicalPath))
+                                                       {
+                                                               LOCK_HELD.Remove(canonicalPath);
+                                                       }
+                                               }
+                                       }
+                               }
+                               return LockExists();
+                       }
+               }
+               
+               public override void  Release()
+               {
+                       lock (this)
+                       {
+                               if (LockExists())
+                               {
+                                       try
+                                       {
+                        channel.Unlock(0, channel.Length);
+                                       }
+                                       finally
+                                       {
+                                               lock_Renamed = false;
+                                               try
+                                               {
+                                                       channel.Close();
+                                               }
+                                               finally
+                                               {
+                                                       channel = null;
+                                                       try
+                                                       {
+                                                               f.Close();
+                                                       }
+                                                       finally
+                                                       {
+                                                               f = null;
+                                                               lock (LOCK_HELD)
+                                                               {
+                                                                       LOCK_HELD.Remove(path.FullName);
+                                                               }
+                                                       }
+                                               }
+                                       }
+                                       bool tmpBool;
+                                       if (System.IO.File.Exists(path.FullName))
+                                       {
+                                               System.IO.File.Delete(path.FullName);
+                                               tmpBool = true;
+                                       }
+                                       else if (System.IO.Directory.Exists(path.FullName))
+                                       {
+                                               System.IO.Directory.Delete(path.FullName);
+                                               tmpBool = true;
+                                       }
+                                       else
+                                               tmpBool = false;
+                                       if (!tmpBool)
+                                               throw new LockReleaseFailedException("failed to delete " + path);
+                               }
+            }
+               }
+               
+               public override bool IsLocked()
+               {
+                       lock (this)
+                       {
+                               // The test for is isLocked is not directly possible with native file locks:
+                               
+                               // First a shortcut, if a lock reference in this instance is available
+                               if (LockExists())
+                                       return true;
+                               
+                               // Look if lock file is present; if not, there can definitely be no lock!
+                               bool tmpBool;
+                               if (System.IO.File.Exists(path.FullName))
+                                       tmpBool = true;
+                               else
+                                       tmpBool = System.IO.Directory.Exists(path.FullName);
+                               if (!tmpBool)
+                                       return false;
+                               
+                               // Try to obtain and release (if was locked) the lock
+                               try
+                               {
+                                       bool obtained = Obtain();
+                                       if (obtained)
+                                               Release();
+                                       return !obtained;
+                               }
+                               catch (System.IO.IOException ioe)
+                               {
+                                       return false;
+                               }
+                       }
+               }
+               
+               public override System.String ToString()
+               {
+                       return "NativeFSLock@" + path;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/NoLockFactory.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/NoLockFactory.cs
new file mode 100644 (file)
index 0000000..88d0230
--- /dev/null
@@ -0,0 +1,77 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Store
+{
+       
+       /// <summary> Use this {@link LockFactory} to disable locking entirely.
+       /// This LockFactory is used when you call {@link FSDirectory#setDisableLocks}.
+       /// Only one instance of this lock is created.  You should call {@link
+       /// #GetNoLockFactory()} to get the instance.
+       /// 
+       /// </summary>
+       /// <seealso cref="LockFactory">
+       /// </seealso>
+       
+       public class NoLockFactory:LockFactory
+       {
+               
+               // Single instance returned whenever makeLock is called.
+               private static NoLock singletonLock = new NoLock();
+               private static NoLockFactory singleton = new NoLockFactory();
+               
+               public static NoLockFactory GetNoLockFactory()
+               {
+                       return singleton;
+               }
+               
+               public override Lock MakeLock(System.String lockName)
+               {
+                       return singletonLock;
+               }
+               
+               public override void  ClearLock(System.String lockName)
+               {
+               }
+               
+       }
+       
+       
+       class NoLock:Lock
+       {
+               public override bool Obtain()
+               {
+                       return true;
+               }
+               
+               public override void  Release()
+               {
+               }
+               
+               public override bool IsLocked()
+               {
+                       return false;
+               }
+               
+               public override System.String ToString()
+               {
+                       return "NoLock";
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/NoSuchDirectoryException.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/NoSuchDirectoryException.cs
new file mode 100644 (file)
index 0000000..115a9d1
--- /dev/null
@@ -0,0 +1,34 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Store
+{
+       
+       /// <summary> This exception is thrown when you try to list a
+       /// non-existent directory.
+       /// </summary>
+       
+       [Serializable]
+       public class NoSuchDirectoryException:System.IO.FileNotFoundException
+       {
+               public NoSuchDirectoryException(System.String message):base(message)
+               {
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/Package.html b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/Package.html
new file mode 100644 (file)
index 0000000..838f2d6
--- /dev/null
@@ -0,0 +1,25 @@
+<!doctype html public "-//w3c//dtd html 4.0 transitional//en">\r
+<!--\r
+ Licensed to the Apache Software Foundation (ASF) under one or more\r
+ contributor license agreements.  See the NOTICE file distributed with\r
+ this work for additional information regarding copyright ownership.\r
+ The ASF licenses this file to You under the Apache License, Version 2.0\r
+ (the "License"); you may not use this file except in compliance with\r
+ the License.  You may obtain a copy of the License at\r
+\r
+     http://www.apache.org/licenses/LICENSE-2.0\r
+\r
+ Unless required by applicable law or agreed to in writing, software\r
+ distributed under the License is distributed on an "AS IS" BASIS,\r
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
+ See the License for the specific language governing permissions and\r
+ limitations under the License.\r
+-->\r
+<html>\r
+<head>\r
+   <meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">\r
+</head>\r
+<body>\r
+Binary i/o API, used for all index data.\r
+</body>\r
+</html>\r
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/RAMDirectory.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/RAMDirectory.cs
new file mode 100644 (file)
index 0000000..b4755ea
--- /dev/null
@@ -0,0 +1,331 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Store
+{
+       
+       /// <summary> A memory-resident {@link Directory} implementation.  Locking
+       /// implementation is by default the {@link SingleInstanceLockFactory}
+       /// but can be changed with {@link #setLockFactory}.
+       /// 
+       /// </summary>
+       /// <version>  $Id: RAMDirectory.java 781333 2009-06-03 10:38:57Z mikemccand $
+       /// </version>
+       [Serializable]
+       public class RAMDirectory:Directory
+       {
+               
+               private const long serialVersionUID = 1L;
+               
+               internal protected System.Collections.Hashtable fileMap = new System.Collections.Hashtable();
+               internal protected long sizeInBytes = 0;
+               
+               // *****
+               // Lock acquisition sequence:  RAMDirectory, then RAMFile
+               // *****
+               
+               /// <summary>Constructs an empty {@link Directory}. </summary>
+               public RAMDirectory()
+               {
+                       SetLockFactory(new SingleInstanceLockFactory());
+               }
+               
+               /// <summary> Creates a new <code>RAMDirectory</code> instance from a different
+               /// <code>Directory</code> implementation.  This can be used to load
+               /// a disk-based index into memory.
+               /// <p/>
+               /// This should be used only with indices that can fit into memory.
+               /// <p/>
+               /// Note that the resulting <code>RAMDirectory</code> instance is fully
+               /// independent from the original <code>Directory</code> (it is a
+               /// complete copy).  Any subsequent changes to the
+               /// original <code>Directory</code> will not be visible in the
+               /// <code>RAMDirectory</code> instance.
+               /// 
+               /// </summary>
+               /// <param name="dir">a <code>Directory</code> value
+               /// </param>
+               /// <exception cref="IOException">if an error occurs
+               /// </exception>
+               public RAMDirectory(Directory dir):this(dir, false)
+               {
+               }
+               
+               private RAMDirectory(Directory dir, bool closeDir):this()
+               {
+                       Directory.Copy(dir, this, closeDir);
+               }
+               
+               /// <summary> Creates a new <code>RAMDirectory</code> instance from the {@link FSDirectory}.
+               /// 
+               /// </summary>
+               /// <param name="dir">a <code>File</code> specifying the index directory
+               /// 
+               /// </param>
+               /// <seealso cref="RAMDirectory(Directory)">
+               /// </seealso>
+               /// <deprecated> Use {@link #RAMDirectory(Directory)} instead
+               /// </deprecated>
+        [Obsolete("Use RAMDirectory(Directory) instead")]
+               public RAMDirectory(System.IO.FileInfo dir):this(FSDirectory.GetDirectory(dir), true)
+               {
+               }
+               
+               /// <summary> Creates a new <code>RAMDirectory</code> instance from the {@link FSDirectory}.
+               /// 
+               /// </summary>
+               /// <param name="dir">a <code>String</code> specifying the full index directory path
+               /// 
+               /// </param>
+               /// <seealso cref="RAMDirectory(Directory)">
+               /// </seealso>
+               /// <deprecated> Use {@link #RAMDirectory(Directory)} instead
+               /// </deprecated>
+        [Obsolete("Use RAMDirectory(Directory) instead")]
+               public RAMDirectory(System.String dir):this(FSDirectory.GetDirectory(dir), true)
+               {
+               }
+
+         //https://issues.apache.org/jira/browse/LUCENENET-174
+        [System.Runtime.Serialization.OnDeserialized]
+        void OnDeserialized(System.Runtime.Serialization.StreamingContext context)
+        {
+            if (lockFactory == null)
+            {
+                SetLockFactory(new SingleInstanceLockFactory());
+            }
+        }
+
+        [Obsolete("Mono.Lucene.Net-2.9.1. This method overrides obsolete member Mono.Lucene.Net.Store.Directory.List()")]
+               public override System.String[] List()
+               {
+                       lock (this)
+                       {
+                               return ListAll();
+                       }
+               }
+               
+               public override System.String[] ListAll()
+               {
+                       lock (this)
+                       {
+                               EnsureOpen();
+                               System.Collections.ICollection fileNames = fileMap.Keys;
+                               System.String[] result = new System.String[fileNames.Count];
+                               int i = 0;
+                               System.Collections.IEnumerator it = fileNames.GetEnumerator();
+                               while (it.MoveNext())
+                               {
+                                       result[i++] = ((System.String) it.Current);
+                               }
+                               return result;
+                       }
+               }
+               
+               /// <summary>Returns true iff the named file exists in this directory. </summary>
+               public override bool FileExists(System.String name)
+               {
+                       EnsureOpen();
+                       RAMFile file;
+                       lock (this)
+                       {
+                               file = (RAMFile) fileMap[name];
+                       }
+                       return file != null;
+               }
+               
+               /// <summary>Returns the time the named file was last modified.</summary>
+               /// <throws>  IOException if the file does not exist </throws>
+               public override long FileModified(System.String name)
+               {
+                       EnsureOpen();
+                       RAMFile file;
+                       lock (this)
+                       {
+                               file = (RAMFile) fileMap[name];
+                       }
+                       if (file == null)
+                               throw new System.IO.FileNotFoundException(name);
+                       return file.GetLastModified();
+               }
+               
+               /// <summary>Set the modified time of an existing file to now.</summary>
+               /// <throws>  IOException if the file does not exist </throws>
+               public override void  TouchFile(System.String name)
+               {
+                       EnsureOpen();
+                       RAMFile file;
+                       lock (this)
+                       {
+                               file = (RAMFile) fileMap[name];
+                       }
+                       if (file == null)
+                               throw new System.IO.FileNotFoundException(name);
+                       
+                       long ts2, ts1 = System.DateTime.Now.Ticks;
+                       do 
+                       {
+                               try
+                               {
+                                       System.Threading.Thread.Sleep(new System.TimeSpan((System.Int64) 10000 * 0 + 100 * 1));
+                               }
+                               catch (System.Threading.ThreadInterruptedException ie)
+                               {
+                                       // In 3.0 we will change this to throw
+                                       // InterruptedException instead
+                                       SupportClass.ThreadClass.Current().Interrupt();
+                                       throw new System.SystemException(ie.Message, ie);
+                               }
+                               ts2 = System.DateTime.Now.Ticks;
+                       }
+                       while (ts1 == ts2);
+                       
+                       file.SetLastModified(ts2);
+               }
+               
+               /// <summary>Returns the length in bytes of a file in the directory.</summary>
+               /// <throws>  IOException if the file does not exist </throws>
+               public override long FileLength(System.String name)
+               {
+                       EnsureOpen();
+                       RAMFile file;
+                       lock (this)
+                       {
+                               file = (RAMFile) fileMap[name];
+                       }
+                       if (file == null)
+                               throw new System.IO.FileNotFoundException(name);
+                       return file.GetLength();
+               }
+               
+               /// <summary>Return total size in bytes of all files in this
+               /// directory.  This is currently quantized to
+               /// RAMOutputStream.BUFFER_SIZE. 
+               /// </summary>
+               public long SizeInBytes()
+               {
+                       lock (this)
+                       {
+                               EnsureOpen();
+                               return sizeInBytes;
+                       }
+               }
+               
+               /// <summary>Removes an existing file in the directory.</summary>
+               /// <throws>  IOException if the file does not exist </throws>
+               public override void  DeleteFile(System.String name)
+               {
+                       lock (this)
+                       {
+                               EnsureOpen();
+                               RAMFile file = (RAMFile) fileMap[name];
+                               if (file != null)
+                               {
+                                       fileMap.Remove(name);
+                                       file.directory = null;
+                                       sizeInBytes -= file.sizeInBytes; 
+                               }
+                               else
+                                       throw new System.IO.FileNotFoundException(name);
+                       }
+               }
+               
+               /// <summary>Renames an existing file in the directory.</summary>
+               /// <throws>  FileNotFoundException if from does not exist </throws>
+               /// <deprecated>
+               /// </deprecated>
+        [Obsolete]
+               public override void  RenameFile(System.String from, System.String to)
+               {
+                       lock (this)
+                       {
+                               EnsureOpen();
+                               RAMFile fromFile = (RAMFile) fileMap[from];
+                               if (fromFile == null)
+                                       throw new System.IO.FileNotFoundException(from);
+                               RAMFile toFile = (RAMFile) fileMap[to];
+                               if (toFile != null)
+                               {
+                                       sizeInBytes -= toFile.sizeInBytes; // updates to RAMFile.sizeInBytes synchronized on directory
+                                       toFile.directory = null;
+                               }
+                               fileMap.Remove(from);
+                               fileMap[to] = fromFile;
+                       }
+               }
+               
+               /// <summary>Creates a new, empty file in the directory with the given name. Returns a stream writing this file. </summary>
+               public override IndexOutput CreateOutput(System.String name)
+               {
+                       EnsureOpen();
+                       RAMFile file = new RAMFile(this);
+                       lock (this)
+                       {
+                               RAMFile existing = (RAMFile) fileMap[name];
+                               if (existing != null)
+                               {
+                                       sizeInBytes -= existing.sizeInBytes;
+                                       existing.directory = null;
+                               }
+                               fileMap[name] = file;
+                       }
+                       return new RAMOutputStream(file);
+               }
+               
+               /// <summary>Returns a stream reading an existing file. </summary>
+               public override IndexInput OpenInput(System.String name)
+               {
+                       EnsureOpen();
+                       RAMFile file;
+                       lock (this)
+                       {
+                               file = (RAMFile) fileMap[name];
+                       }
+                       if (file == null)
+                               throw new System.IO.FileNotFoundException(name);
+                       return new RAMInputStream(file);
+               }
+               
+               /// <summary>Closes the store to future operations, releasing associated memory. </summary>
+               public override void  Close()
+               {
+                       isOpen = false;
+                       fileMap = null;
+               }
+
+        /// <summary>
+        /// .NET
+        /// </summary>
+        public override void Dispose()
+        {
+            Close();
+        }
+
+        public System.Collections.Hashtable fileMap_ForNUnit
+        {
+            get { return fileMap; }
+        }
+
+        public long sizeInBytes_ForNUnitTest
+        {
+            get { return sizeInBytes; }
+            set { sizeInBytes = value; }
+        }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/RAMFile.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/RAMFile.cs
new file mode 100644 (file)
index 0000000..fbe9be9
--- /dev/null
@@ -0,0 +1,154 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Store
+{
+       
+       [Serializable]
+       public class RAMFile
+       {
+               
+               private const long serialVersionUID = 1L;
+               
+               protected System.Collections.ArrayList buffers = new System.Collections.ArrayList();
+               internal long length;
+               internal RAMDirectory directory;
+               internal long sizeInBytes; 
+               
+               // This is publicly modifiable via Directory.touchFile(), so direct access not supported
+               private long lastModified = (DateTime.Now.Ticks / TimeSpan.TicksPerMillisecond);
+               
+               // File used as buffer, in no RAMDirectory
+               public /*internal*/ RAMFile()
+               {
+               }
+               
+               public /*internal*/ RAMFile(RAMDirectory directory)
+               {
+                       this.directory = directory;
+               }
+               
+               // For non-stream access from thread that might be concurrent with writing
+               public /*internal*/ virtual long GetLength()
+               {
+                       lock (this)
+                       {
+                               return length;
+                       }
+               }
+               
+               public /*internal*/ virtual void  SetLength(long length)
+               {
+                       lock (this)
+                       {
+                               this.length = length;
+                       }
+               }
+               
+               // For non-stream access from thread that might be concurrent with writing
+               internal virtual long GetLastModified()
+               {
+                       lock (this)
+                       {
+                               return lastModified;
+                       }
+               }
+               
+               internal virtual void  SetLastModified(long lastModified)
+               {
+                       lock (this)
+                       {
+                               this.lastModified = lastModified;
+                       }
+               }
+               
+               internal byte[] AddBuffer(int size)
+               {
+            byte[] buffer = NewBuffer(size);
+            lock (this)
+            {
+                buffers.Add(buffer);
+                sizeInBytes += size;
+            }
+
+            if (directory != null)
+            {
+                lock (directory) //{{DIGY}} what if directory gets null in the mean time?
+                {
+                    directory.sizeInBytes += size;
+                }
+            }
+
+            return buffer;
+               }
+               
+               public /*internal*/ byte[] GetBuffer(int index)
+               {
+                       lock (this)
+                       {
+                               return (byte[]) buffers[index];
+                       }
+               }
+               
+               public /*internal*/ int NumBuffers()
+               {
+                       lock (this)
+                       {
+                               return buffers.Count;
+                       }
+               }
+               
+               /// <summary> Expert: allocate a new buffer. 
+               /// Subclasses can allocate differently. 
+               /// </summary>
+               /// <param name="size">size of allocated buffer.
+               /// </param>
+               /// <returns> allocated buffer.
+               /// </returns>
+               public /*internal*/ virtual byte[] NewBuffer(int size)
+               {
+                       return new byte[size];
+               }
+               
+               
+               public /*internal*/ virtual long GetSizeInBytes()
+               {
+            lock (this)
+            {
+                return sizeInBytes;
+            }
+               }
+
+        public long length_ForNUnit
+        {
+            get { return length; }
+        }
+
+        public RAMDirectory directory_ForNUnit
+        {
+            get { return directory; }
+            set { directory = value; }
+        }
+
+        public long sizeInBytes_ForNUnit
+        {
+            get { return sizeInBytes; }
+        }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/RAMInputStream.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/RAMInputStream.cs
new file mode 100644 (file)
index 0000000..3c4e1ae
--- /dev/null
@@ -0,0 +1,141 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Store
+{
+       
+       /// <summary> A memory-resident {@link IndexInput} implementation.
+       /// 
+       /// </summary>
+       /// <version>  $Id: RAMInputStream.java 632120 2008-02-28 21:13:59Z mikemccand $
+       /// </version>
+       
+       public class RAMInputStream:IndexInput, System.ICloneable
+       {
+               internal static readonly int BUFFER_SIZE;
+               
+               private RAMFile file;
+               private long length;
+               
+               private byte[] currentBuffer;
+               private int currentBufferIndex;
+               
+               private int bufferPosition;
+               private long bufferStart;
+               private int bufferLength;
+               
+               public /*internal*/ RAMInputStream(RAMFile f)
+               {
+                       file = f;
+                       length = file.length;
+                       if (length / BUFFER_SIZE >= System.Int32.MaxValue)
+                       {
+                               throw new System.IO.IOException("Too large RAMFile! " + length);
+                       }
+                       
+                       // make sure that we switch to the
+                       // first needed buffer lazily
+                       currentBufferIndex = - 1;
+                       currentBuffer = null;
+               }
+               
+               public override void  Close()
+               {
+                       // nothing to do here
+               }
+               
+               public override long Length()
+               {
+                       return length;
+               }
+               
+               public override byte ReadByte()
+               {
+                       if (bufferPosition >= bufferLength)
+                       {
+                               currentBufferIndex++;
+                               SwitchCurrentBuffer(true);
+                       }
+                       return currentBuffer[bufferPosition++];
+               }
+               
+               public override void  ReadBytes(byte[] b, int offset, int len)
+               {
+                       while (len > 0)
+                       {
+                               if (bufferPosition >= bufferLength)
+                               {
+                                       currentBufferIndex++;
+                                       SwitchCurrentBuffer(true);
+                               }
+                               
+                               int remainInBuffer = bufferLength - bufferPosition;
+                               int bytesToCopy = len < remainInBuffer?len:remainInBuffer;
+                               Array.Copy(currentBuffer, bufferPosition, b, offset, bytesToCopy);
+                               offset += bytesToCopy;
+                               len -= bytesToCopy;
+                               bufferPosition += bytesToCopy;
+                       }
+               }
+               
+               private void  SwitchCurrentBuffer(bool enforceEOF)
+               {
+                       if (currentBufferIndex >= file.NumBuffers())
+                       {
+                               // end of file reached, no more buffers left
+                               if (enforceEOF)
+                                       throw new System.IO.IOException("Read past EOF");
+                               else
+                               {
+                                       // Force EOF if a read takes place at this position
+                                       currentBufferIndex--;
+                                       bufferPosition = BUFFER_SIZE;
+                               }
+                       }
+                       else
+                       {
+                               currentBuffer = (byte[]) file.GetBuffer(currentBufferIndex);
+                               bufferPosition = 0;
+                               bufferStart = (long) BUFFER_SIZE * (long) currentBufferIndex;
+                               long buflen = length - bufferStart;
+                               bufferLength = buflen > BUFFER_SIZE?BUFFER_SIZE:(int) buflen;
+                       }
+               }
+               
+               public override long GetFilePointer()
+               {
+                       return currentBufferIndex < 0?0:bufferStart + bufferPosition;
+               }
+               
+               public override void  Seek(long pos)
+               {
+                       if (currentBuffer == null || pos < bufferStart || pos >= bufferStart + BUFFER_SIZE)
+                       {
+                               currentBufferIndex = (int) (pos / BUFFER_SIZE);
+                               SwitchCurrentBuffer(false);
+                       }
+                       bufferPosition = (int) (pos % BUFFER_SIZE);
+               }
+               
+               static RAMInputStream()
+               {
+                       BUFFER_SIZE = RAMOutputStream.BUFFER_SIZE;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/RAMOutputStream.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/RAMOutputStream.cs
new file mode 100644 (file)
index 0000000..889c427
--- /dev/null
@@ -0,0 +1,190 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Store
+{
+       
+       /// <summary> A memory-resident {@link IndexOutput} implementation.
+       /// 
+       /// </summary>
+       /// <version>  $Id: RAMOutputStream.java 691694 2008-09-03 17:34:29Z mikemccand $
+       /// </version>
+       
+       public class RAMOutputStream:IndexOutput
+       {
+               internal const int BUFFER_SIZE = 1024;
+               
+               private RAMFile file;
+               
+               private byte[] currentBuffer;
+               private int currentBufferIndex;
+               
+               private int bufferPosition;
+               private long bufferStart;
+               private int bufferLength;
+               
+               /// <summary>Construct an empty output buffer. </summary>
+               public RAMOutputStream():this(new RAMFile())
+               {
+               }
+               
+               public /*internal*/ RAMOutputStream(RAMFile f)
+               {
+                       file = f;
+                       
+                       // make sure that we switch to the
+                       // first needed buffer lazily
+                       currentBufferIndex = - 1;
+                       currentBuffer = null;
+               }
+               
+               /// <summary>Copy the current contents of this buffer to the named output. </summary>
+               public virtual void  WriteTo(IndexOutput out_Renamed)
+               {
+                       Flush();
+                       long end = file.length;
+                       long pos = 0;
+                       int buffer = 0;
+                       while (pos < end)
+                       {
+                               int length = BUFFER_SIZE;
+                               long nextPos = pos + length;
+                               if (nextPos > end)
+                               {
+                                       // at the last buffer
+                                       length = (int) (end - pos);
+                               }
+                               out_Renamed.WriteBytes((byte[]) file.GetBuffer(buffer++), length);
+                               pos = nextPos;
+                       }
+               }
+               
+               /// <summary>Resets this to an empty buffer. </summary>
+               public virtual void  Reset()
+               {
+            currentBuffer = null;
+            currentBufferIndex = -1;
+            bufferPosition = 0;
+            bufferStart = 0;
+            bufferLength = 0;
+                       
+                       file.SetLength(0);
+               }
+               
+               public override void  Close()
+               {
+                       Flush();
+               }
+               
+               public override void  Seek(long pos)
+               {
+                       // set the file length in case we seek back
+                       // and flush() has not been called yet
+                       SetFileLength();
+                       if (pos < bufferStart || pos >= bufferStart + bufferLength)
+                       {
+                               currentBufferIndex = (int) (pos / BUFFER_SIZE);
+                               SwitchCurrentBuffer();
+                       }
+                       
+                       bufferPosition = (int) (pos % BUFFER_SIZE);
+               }
+               
+               public override long Length()
+               {
+                       return file.length;
+               }
+               
+               public override void  WriteByte(byte b)
+               {
+                       if (bufferPosition == bufferLength)
+                       {
+                               currentBufferIndex++;
+                               SwitchCurrentBuffer();
+                       }
+                       currentBuffer[bufferPosition++] = b;
+               }
+               
+               public override void  WriteBytes(byte[] b, int offset, int len)
+               {
+                       System.Diagnostics.Debug.Assert(b != null);
+                       while (len > 0)
+                       {
+                               if (bufferPosition == bufferLength)
+                               {
+                                       currentBufferIndex++;
+                                       SwitchCurrentBuffer();
+                               }
+                               
+                               int remainInBuffer = currentBuffer.Length - bufferPosition;
+                               int bytesToCopy = len < remainInBuffer?len:remainInBuffer;
+                               Array.Copy(b, offset, currentBuffer, bufferPosition, bytesToCopy);
+                               offset += bytesToCopy;
+                               len -= bytesToCopy;
+                               bufferPosition += bytesToCopy;
+                       }
+               }
+               
+               private void  SwitchCurrentBuffer()
+               {
+                       if (currentBufferIndex == file.NumBuffers())
+                       {
+                               currentBuffer = file.AddBuffer(BUFFER_SIZE);
+                       }
+                       else
+                       {
+                               currentBuffer = (byte[]) file.GetBuffer(currentBufferIndex);
+                       }
+                       bufferPosition = 0;
+                       bufferStart = (long) BUFFER_SIZE * (long) currentBufferIndex;
+                       bufferLength = currentBuffer.Length;
+               }
+               
+               private void  SetFileLength()
+               {
+                       long pointer = bufferStart + bufferPosition;
+                       if (pointer > file.length)
+                       {
+                               file.SetLength(pointer);
+                       }
+               }
+               
+               public override void  Flush()
+               {
+                       file.SetLastModified((DateTime.Now.Ticks / TimeSpan.TicksPerMillisecond));
+                       SetFileLength();
+               }
+               
+               public override long GetFilePointer()
+               {
+                       return currentBufferIndex < 0?0:bufferStart + bufferPosition;
+               }
+               
+               /// <summary>Returns byte usage of all buffers. </summary>
+               public virtual long SizeInBytes()
+               {
+                       return file.NumBuffers() * BUFFER_SIZE;
+               }
+
+        public static int BUFFER_SIZE_ForNUnit
+        {
+            get { return BUFFER_SIZE; }
+        }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/SimpleFSDirectory.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/SimpleFSDirectory.cs
new file mode 100644 (file)
index 0000000..4b0c27e
--- /dev/null
@@ -0,0 +1,326 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Store
+{
+       
+       /// <summary>A straightforward implementation of {@link FSDirectory}
+       /// using java.io.RandomAccessFile.  However, this class has
+       /// poor concurrent performance (multiple threads will
+       /// bottleneck) as it synchronizes when multiple threads
+       /// read from the same file.  It's usually better to use
+       /// {@link NIOFSDirectory} or {@link MMapDirectory} instead. 
+       /// </summary>
+       public class SimpleFSDirectory:FSDirectory
+       {
+               
+               /// <summary>Create a new SimpleFSDirectory for the named location.
+               /// 
+               /// </summary>
+               /// <param name="path">the path of the directory
+               /// </param>
+               /// <param name="lockFactory">the lock factory to use, or null for the default.
+               /// </param>
+               /// <throws>  IOException </throws>
+               [System.Obsolete("Use the constructor that takes a DirectoryInfo, this will be removed in the 3.0 release")]
+               public SimpleFSDirectory(System.IO.FileInfo path, LockFactory lockFactory):base(new System.IO.DirectoryInfo(path.FullName), lockFactory)
+               {
+               }
+               
+        /// <summary>Create a new SimpleFSDirectory for the named location.
+        /// 
+        /// </summary>
+        /// <param name="path">the path of the directory
+        /// </param>
+        /// <param name="lockFactory">the lock factory to use, or null for the default.
+        /// </param>
+        /// <throws>  IOException </throws>
+        public SimpleFSDirectory(System.IO.DirectoryInfo path, LockFactory lockFactory) : base(path, lockFactory)
+        {
+        }
+               
+               /// <summary>Create a new SimpleFSDirectory for the named location and the default lock factory.
+               /// 
+               /// </summary>
+               /// <param name="path">the path of the directory
+               /// </param>
+               /// <throws>  IOException </throws>
+        [System.Obsolete("Use the constructor that takes a DirectoryInfo, this will be removed in the 3.0 release")]
+               public SimpleFSDirectory(System.IO.FileInfo path):base(new System.IO.DirectoryInfo(path.FullName), null)
+               {
+               }
+               
+               // back compatibility so FSDirectory can instantiate via reflection
+               /// <deprecated> 
+               /// </deprecated>
+        [Obsolete]
+               internal SimpleFSDirectory()
+               {
+               }
+               
+        /// <summary>Create a new SimpleFSDirectory for the named location and the default lock factory.
+        /// 
+        /// </summary>
+        /// <param name="path">the path of the directory
+        /// </param>
+        /// <throws>  IOException </throws>
+        public SimpleFSDirectory(System.IO.DirectoryInfo path) : base(path, null)
+        {
+        }
+
+               /// <summary>Creates an IndexOutput for the file with the given name. </summary>
+               public override IndexOutput CreateOutput(System.String name)
+               {
+                       InitOutput(name);
+                       return new SimpleFSIndexOutput(new System.IO.FileInfo(System.IO.Path.Combine(directory.FullName, name)));
+               }
+               
+               /// <summary>Creates an IndexInput for the file with the given name. </summary>
+               public override IndexInput OpenInput(System.String name, int bufferSize)
+               {
+                       EnsureOpen();
+                       return new SimpleFSIndexInput(new System.IO.FileInfo(System.IO.Path.Combine(directory.FullName, name)), bufferSize, GetReadChunkSize());
+               }
+               
+               public /*protected internal*/class SimpleFSIndexInput:BufferedIndexInput, System.ICloneable
+               {
+                       
+                       protected internal class Descriptor:System.IO.BinaryReader
+                       {
+                               // remember if the file is open, so that we don't try to close it
+                               // more than once
+                               protected internal volatile bool isOpen;
+                               internal long position;
+                               internal long length;
+                               
+                               public Descriptor(/*FSIndexInput enclosingInstance,*/ System.IO.FileInfo file, System.IO.FileAccess mode) 
+                                       : base(new System.IO.FileStream(file.FullName, System.IO.FileMode.Open, mode, System.IO.FileShare.ReadWrite))
+                               {
+                                       isOpen = true;
+                                       length = file.Length;
+                               }
+                               
+                               public override void  Close()
+                               {
+                                       if (isOpen)
+                                       {
+                                               isOpen = false;
+                                               base.Close();
+                                       }
+                               }
+                       
+                               ~Descriptor()
+                               {
+                                       try
+                                       {
+                                               Close();
+                                       }
+                                       finally
+                                       {
+                                       }
+                               }
+                       }
+                       
+                       protected internal Descriptor file;
+                       internal bool isClone;
+                       //  LUCENE-1566 - maximum read length on a 32bit JVM to prevent incorrect OOM 
+                       protected internal int chunkSize;
+                       
+                       /// <deprecated> Please use ctor taking chunkSize 
+                       /// </deprecated>
+            [Obsolete("Please use ctor taking chunkSize ")]
+                       public SimpleFSIndexInput(System.IO.FileInfo path):this(path, BufferedIndexInput.BUFFER_SIZE, SimpleFSDirectory.DEFAULT_READ_CHUNK_SIZE)
+                       {
+                       }
+                       
+                       /// <deprecated> Please use ctor taking chunkSize 
+                       /// </deprecated>
+            [Obsolete("Please use ctor taking chunkSize ")]
+                       public SimpleFSIndexInput(System.IO.FileInfo path, int bufferSize):this(path, bufferSize, SimpleFSDirectory.DEFAULT_READ_CHUNK_SIZE)
+                       {
+                       }
+                       
+                       public SimpleFSIndexInput(System.IO.FileInfo path, int bufferSize, int chunkSize):base(bufferSize)
+                       {
+                               file = new Descriptor(path, System.IO.FileAccess.Read);
+                               this.chunkSize = chunkSize;
+                       }
+                       
+                       /// <summary>IndexInput methods </summary>
+                       public override void  ReadInternal(byte[] b, int offset, int len)
+                       {
+                               lock (file)
+                               {
+                                       long position = GetFilePointer();
+                                       if (position != file.position)
+                                       {
+                                               file.BaseStream.Seek(position, System.IO.SeekOrigin.Begin);
+                                               file.position = position;
+                                       }
+                                       int total = 0;
+                                       
+                                       try
+                                       {
+                                               do 
+                                               {
+                                                       int readLength;
+                                                       if (total + chunkSize > len)
+                                                       {
+                                                               readLength = len - total;
+                                                       }
+                                                       else
+                                                       {
+                                                               // LUCENE-1566 - work around JVM Bug by breaking very large reads into chunks
+                                                               readLength = chunkSize;
+                                                       }
+                                                       int i = file.Read(b, offset + total, readLength);
+                                                       if (i == - 1)
+                                                       {
+                                                               throw new System.IO.IOException("read past EOF");
+                                                       }
+                                                       file.position += i;
+                                                       total += i;
+                                               }
+                                               while (total < len);
+                                       }
+                                       catch (System.OutOfMemoryException e)
+                                       {
+                                               // propagate OOM up and add a hint for 32bit VM Users hitting the bug
+                                               // with a large chunk size in the fast path.
+                                               System.OutOfMemoryException outOfMemoryError = new System.OutOfMemoryException("OutOfMemoryError likely caused by the Sun VM Bug described in " + "https://issues.apache.org/jira/browse/LUCENE-1566; try calling FSDirectory.setReadChunkSize " + "with a a value smaller than the current chunks size (" + chunkSize + ")", e);
+                                               throw outOfMemoryError;
+                                       }
+                               }
+                       }
+                       
+                       public override void  Close()
+                       {
+                               // only close the file if this is not a clone
+                               if (!isClone)
+                                       file.Close();
+                       }
+                       
+                       public override void  SeekInternal(long position)
+                       {
+                       }
+                       
+                       public override long Length()
+                       {
+                               return file.length;
+                       }
+                       
+                       public override System.Object Clone()
+                       {
+                               SimpleFSIndexInput clone = (SimpleFSIndexInput) base.Clone();
+                               clone.isClone = true;
+                               return clone;
+                       }
+                       
+                       /// <summary>Method used for testing. Returns true if the underlying
+                       /// file descriptor is valid.
+                       /// </summary>
+                       public /*internal*/ virtual bool IsFDValid()
+                       {
+                               return file.BaseStream != null;
+                       }
+
+            public bool isClone_ForNUnit
+            {
+                get { return isClone; }
+            }
+               }
+               
+               /*protected internal*/ public class SimpleFSIndexOutput:BufferedIndexOutput
+               {
+                       internal System.IO.FileStream file = null;
+                       
+                       // remember if the file is open, so that we don't try to close it
+                       // more than once
+                       private volatile bool isOpen;
+                       
+                       public SimpleFSIndexOutput(System.IO.FileInfo path)
+                       {
+                               file = new System.IO.FileStream(path.FullName, System.IO.FileMode.OpenOrCreate, System.IO.FileAccess.ReadWrite);
+                               isOpen = true;
+                       }
+                       
+                       /// <summary>output methods: </summary>
+                       public override void  FlushBuffer(byte[] b, int offset, int size)
+                       {
+                               file.Write(b, offset, size);
+                // {{dougsale-2.4.0}}
+                // FSIndexOutput.Flush
+                // When writing frequently with small amounts of data, the data isn't flushed to disk.
+                // Thus, attempting to read the data soon after this method is invoked leads to
+                // BufferedIndexInput.Refill() throwing an IOException for reading past EOF.
+                // Test\Index\TestDoc.cs demonstrates such a situation.
+                // Forcing a flush here prevents said issue.
+                // {{DIGY 2.9.0}}
+                // This code is not available in Lucene.Java 2.9.X.
+                // Can there be a indexing-performance problem?
+                file.Flush();
+                       }
+                       public override void  Close()
+                       {
+                               // only close the file if it has not been closed yet
+                               if (isOpen)
+                               {
+                                       bool success = false;
+                                       try
+                                       {
+                                               base.Close();
+                                               success = true;
+                                       }
+                                       finally
+                                       {
+                                               isOpen = false;
+                                               if (!success)
+                                               {
+                                                       try
+                                                       {
+                                                               file.Close();
+                                                       }
+                                                       catch (System.Exception t)
+                                                       {
+                                                               // Suppress so we don't mask original exception
+                                                       }
+                                               }
+                                               else
+                                                       file.Close();
+                                       }
+                               }
+                       }
+                       
+                       /// <summary>Random-access methods </summary>
+                       public override void  Seek(long pos)
+                       {
+                               base.Seek(pos);
+                               file.Seek(pos, System.IO.SeekOrigin.Begin);
+                       }
+                       public override long Length()
+                       {
+                               return file.Length;
+                       }
+                       public override void  SetLength(long length)
+                       {
+                               file.SetLength(length);
+                       }
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/SimpleFSLockFactory.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/SimpleFSLockFactory.cs
new file mode 100644 (file)
index 0000000..0d49876
--- /dev/null
@@ -0,0 +1,242 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Store
+{
+       
+       /// <summary> <p/>Implements {@link LockFactory} using {@link
+       /// File#createNewFile()}.<p/>
+       /// 
+       /// <p/><b>NOTE:</b> the <a target="_top"
+       /// href="http://java.sun.com/j2se/1.4.2/docs/api/java/io/File.html#createNewFile()">javadocs
+       /// for <code>File.createNewFile</code></a> contain a vague
+       /// yet spooky warning about not using the API for file
+       /// locking.  This warning was added due to <a target="_top"
+       /// href="http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=4676183">this
+       /// bug</a>, and in fact the only known problem with using
+       /// this API for locking is that the Lucene write lock may
+       /// not be released when the JVM exits abnormally.<p/>
+       /// <p/>When this happens, a {@link LockObtainFailedException}
+       /// is hit when trying to create a writer, in which case you
+       /// need to explicitly clear the lock file first.  You can
+       /// either manually remove the file, or use the {@link
+       /// org.apache.lucene.index.IndexReader#unlock(Directory)}
+       /// API.  But, first be certain that no writer is in fact
+       /// writing to the index otherwise you can easily corrupt
+       /// your index.<p/>
+       /// 
+       /// <p/>If you suspect that this or any other LockFactory is
+       /// not working properly in your environment, you can easily
+       /// test it by using {@link VerifyingLockFactory}, {@link
+       /// LockVerifyServer} and {@link LockStressTest}.<p/>
+       /// 
+       /// </summary>
+       /// <seealso cref="LockFactory">
+       /// </seealso>
+       
+       public class SimpleFSLockFactory:FSLockFactory
+       {
+               
+               /// <summary> Create a SimpleFSLockFactory instance, with null (unset)
+               /// lock directory. When you pass this factory to a {@link FSDirectory}
+               /// subclass, the lock directory is automatically set to the
+               /// directory itsself. Be sure to create one instance for each directory
+               /// your create!
+               /// </summary>
+               public SimpleFSLockFactory():this((System.IO.DirectoryInfo) null)
+               {
+               }
+               
+               /// <summary> Instantiate using the provided directory (as a File instance).</summary>
+               /// <param name="lockDir">where lock files should be created.
+               /// </param>
+               [System.Obsolete("Use the constructor that takes a DirectoryInfo, this will be removed in the 3.0 release")]
+               public SimpleFSLockFactory(System.IO.FileInfo lockDir)
+               {
+                       SetLockDir(new System.IO.DirectoryInfo(lockDir.FullName));
+               }
+
+        /// <summary> Instantiate using the provided directory (as a File instance).</summary>
+        /// <param name="lockDir">where lock files should be created.
+        /// </param>
+        public SimpleFSLockFactory(System.IO.DirectoryInfo lockDir)
+        {
+            SetLockDir(lockDir);
+        }
+               
+               /// <summary> Instantiate using the provided directory name (String).</summary>
+               /// <param name="lockDirName">where lock files should be created.
+               /// </param>
+               public SimpleFSLockFactory(System.String lockDirName)
+               {
+                       lockDir = new System.IO.DirectoryInfo(lockDirName);
+                       SetLockDir(lockDir);
+               }
+               
+               public override Lock MakeLock(System.String lockName)
+               {
+                       if (lockPrefix != null)
+                       {
+                               lockName = lockPrefix + "-" + lockName;
+                       }
+                       return new SimpleFSLock(lockDir, lockName);
+               }
+               
+               public override void  ClearLock(System.String lockName)
+               {
+                       bool tmpBool;
+                       if (System.IO.File.Exists(lockDir.FullName))
+                               tmpBool = true;
+                       else
+                               tmpBool = System.IO.Directory.Exists(lockDir.FullName);
+                       if (tmpBool)
+                       {
+                               if (lockPrefix != null)
+                               {
+                                       lockName = lockPrefix + "-" + lockName;
+                               }
+                               System.IO.FileInfo lockFile = new System.IO.FileInfo(System.IO.Path.Combine(lockDir.FullName, lockName));
+                               bool tmpBool2;
+                               if (System.IO.File.Exists(lockFile.FullName))
+                                       tmpBool2 = true;
+                               else
+                                       tmpBool2 = System.IO.Directory.Exists(lockFile.FullName);
+                               bool tmpBool3;
+                               if (System.IO.File.Exists(lockFile.FullName))
+                               {
+                                       System.IO.File.Delete(lockFile.FullName);
+                                       tmpBool3 = true;
+                               }
+                               else if (System.IO.Directory.Exists(lockFile.FullName))
+                               {
+                                       System.IO.Directory.Delete(lockFile.FullName);
+                                       tmpBool3 = true;
+                               }
+                               else
+                                       tmpBool3 = false;
+                               if (tmpBool2 && !tmpBool3)
+                               {
+                                       throw new System.IO.IOException("Cannot delete " + lockFile);
+                               }
+                       }
+               }
+       }
+       
+       
+       class SimpleFSLock:Lock
+       {
+               
+               internal System.IO.FileInfo lockFile;
+               internal System.IO.DirectoryInfo lockDir;
+
+               [System.Obsolete("Use the constructor that takes a DirectoryInfo, this will be removed in the 3.0 release")]
+               public SimpleFSLock(System.IO.FileInfo lockDir, System.String lockFileName) : this(new System.IO.DirectoryInfo(lockDir.FullName), lockFileName)
+               {
+               }
+
+        public SimpleFSLock(System.IO.DirectoryInfo lockDir, System.String lockFileName)
+        {
+            this.lockDir = new System.IO.DirectoryInfo(lockDir.FullName);
+            lockFile = new System.IO.FileInfo(System.IO.Path.Combine(lockDir.FullName, lockFileName));
+        }
+               
+               public override bool Obtain()
+               {
+                       
+                       // Ensure that lockDir exists and is a directory:
+                       bool tmpBool;
+                       if (System.IO.File.Exists(lockDir.FullName))
+                               tmpBool = true;
+                       else
+                               tmpBool = System.IO.Directory.Exists(lockDir.FullName);
+                       if (!tmpBool)
+                       {
+                               try
+                {
+                    System.IO.Directory.CreateDirectory(lockDir.FullName);
+                }
+                catch
+                {
+                                       throw new System.IO.IOException("Cannot create directory: " + lockDir.FullName);
+                }
+                       }
+                       else
+                       {
+                try
+                {
+                     System.IO.Directory.Exists(lockDir.FullName);
+                }
+                catch
+                {
+                               throw new System.IO.IOException("Found regular file where directory expected: " + lockDir.FullName);
+                }
+                       }
+
+                       if (lockFile.Exists)
+                       {
+                               return false;
+                       }
+                       else
+                       {
+                               System.IO.FileStream createdFile = lockFile.Create();
+                               createdFile.Close();
+                               return true;
+                       }
+               }
+               
+               public override void  Release()
+               {
+                       bool tmpBool;
+                       if (System.IO.File.Exists(lockFile.FullName))
+                               tmpBool = true;
+                       else
+                               tmpBool = System.IO.Directory.Exists(lockFile.FullName);
+                       bool tmpBool2;
+                       if (System.IO.File.Exists(lockFile.FullName))
+                       {
+                               System.IO.File.Delete(lockFile.FullName);
+                               tmpBool2 = true;
+                       }
+                       else if (System.IO.Directory.Exists(lockFile.FullName))
+                       {
+                               System.IO.Directory.Delete(lockFile.FullName);
+                               tmpBool2 = true;
+                       }
+                       else
+                               tmpBool2 = false;
+                       if (tmpBool && !tmpBool2)
+                               throw new LockReleaseFailedException("failed to delete " + lockFile);
+               }
+               
+               public override bool IsLocked()
+               {
+                       bool tmpBool;
+                       if (System.IO.File.Exists(lockFile.FullName))
+                               tmpBool = true;
+                       else
+                               tmpBool = System.IO.Directory.Exists(lockFile.FullName);
+                       return tmpBool;
+               }
+               
+               public override System.String ToString()
+               {
+                       return "SimpleFSLock@" + lockFile;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/SingleInstanceLockFactory.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/SingleInstanceLockFactory.cs
new file mode 100644 (file)
index 0000000..e4a6be8
--- /dev/null
@@ -0,0 +1,107 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Store
+{
+       
+       /// <summary> Implements {@link LockFactory} for a single in-process instance,
+       /// meaning all locking will take place through this one instance.
+       /// Only use this {@link LockFactory} when you are certain all
+       /// IndexReaders and IndexWriters for a given index are running
+       /// against a single shared in-process Directory instance.  This is
+       /// currently the default locking for RAMDirectory.
+       /// 
+       /// </summary>
+       /// <seealso cref="LockFactory">
+       /// </seealso>
+       
+       public class SingleInstanceLockFactory:LockFactory
+       {
+
+        private System.Collections.Hashtable locks = new System.Collections.Hashtable();
+               
+               public override Lock MakeLock(System.String lockName)
+               {
+                       // We do not use the LockPrefix at all, because the private
+                       // HashSet instance effectively scopes the locking to this
+                       // single Directory instance.
+                       return new SingleInstanceLock(locks, lockName);
+               }
+               
+               public override void  ClearLock(System.String lockName)
+               {
+                       lock (locks)
+                       {
+                               if (locks.Contains(lockName))
+                               {
+                                       locks.Remove(lockName);
+                               }
+                       }
+               }
+       }
+       
+       
+       class SingleInstanceLock:Lock
+       {
+               
+               internal System.String lockName;
+               private System.Collections.Hashtable locks;
+               
+               public SingleInstanceLock(System.Collections.Hashtable locks, System.String lockName)
+               {
+                       this.locks = locks;
+                       this.lockName = lockName;
+               }
+               
+               public override bool Obtain()
+               {
+                       lock (locks)
+                       {
+                if (locks.Contains(lockName) == false)
+                {
+                    locks.Add(lockName, lockName);
+                    return true;
+                }
+
+                return false;
+                       }
+               }
+               
+               public override void  Release()
+               {
+                       lock (locks)
+                       {
+                               locks.Remove(lockName);
+                       }
+               }
+               
+               public override bool IsLocked()
+               {
+                       lock (locks)
+                       {
+                               return locks.Contains(lockName);
+                       }
+               }
+               
+               public override System.String ToString()
+               {
+                       return base.ToString() + ": " + lockName;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/VerifyingLockFactory.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Store/VerifyingLockFactory.cs
new file mode 100644 (file)
index 0000000..8ef93e6
--- /dev/null
@@ -0,0 +1,165 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Store
+{
+       
+       /// <summary> A {@link LockFactory} that wraps another {@link
+       /// LockFactory} and verifies that each lock obtain/release
+       /// is "correct" (never results in two processes holding the
+       /// lock at the same time).  It does this by contacting an
+       /// external server ({@link LockVerifyServer}) to assert that
+       /// at most one process holds the lock at a time.  To use
+       /// this, you should also run {@link LockVerifyServer} on the
+       /// host &amp; port matching what you pass to the constructor.
+       /// 
+       /// </summary>
+       /// <seealso cref="LockVerifyServer">
+       /// </seealso>
+       /// <seealso cref="LockStressTest">
+       /// </seealso>
+       
+       public class VerifyingLockFactory:LockFactory
+       {
+               
+               internal LockFactory lf;
+               internal sbyte id;
+               internal System.String host;
+               internal int port;
+               
+               private class CheckedLock:Lock
+               {
+                       private void  InitBlock(VerifyingLockFactory enclosingInstance)
+                       {
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private VerifyingLockFactory enclosingInstance;
+                       public VerifyingLockFactory Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       private Lock lock_Renamed;
+                       
+                       public CheckedLock(VerifyingLockFactory enclosingInstance, Lock lock_Renamed)
+                       {
+                               InitBlock(enclosingInstance);
+                               this.lock_Renamed = lock_Renamed;
+                       }
+                       
+                       private void  Verify(sbyte message)
+                       {
+                               try
+                               {
+                                       System.Net.Sockets.TcpClient s = new System.Net.Sockets.TcpClient(Enclosing_Instance.host, Enclosing_Instance.port);
+                                       System.IO.Stream out_Renamed = s.GetStream();
+                                       out_Renamed.WriteByte((byte) Enclosing_Instance.id);
+                                       out_Renamed.WriteByte((byte) message);
+                                       System.IO.Stream in_Renamed = s.GetStream();
+                                       int result = in_Renamed.ReadByte();
+                                       in_Renamed.Close();
+                                       out_Renamed.Close();
+                                       s.Close();
+                                       if (result != 0)
+                                               throw new System.SystemException("lock was double acquired");
+                               }
+                               catch (System.Exception e)
+                               {
+                                       throw new System.SystemException(e.Message, e);
+                               }
+                       }
+                       
+                       public override bool Obtain(long lockWaitTimeout)
+                       {
+                               lock (this)
+                               {
+                                       bool obtained = lock_Renamed.Obtain(lockWaitTimeout);
+                                       if (obtained)
+                                               Verify((sbyte) 1);
+                                       return obtained;
+                               }
+                       }
+                       
+                       public override bool Obtain()
+                       {
+                               lock (this)
+                               {
+                                       return lock_Renamed.Obtain();
+                               }
+                       }
+                       
+                       public override bool IsLocked()
+                       {
+                               lock (this)
+                               {
+                                       return lock_Renamed.IsLocked();
+                               }
+                       }
+                       
+                       public override void  Release()
+                       {
+                               lock (this)
+                               {
+                                       if (IsLocked())
+                                       {
+                                               Verify((sbyte) 0);
+                                               lock_Renamed.Release();
+                                       }
+                               }
+                       }
+               }
+               
+               /// <param name="id">should be a unique id across all clients
+               /// </param>
+               /// <param name="lf">the LockFactory that we are testing
+               /// </param>
+               /// <param name="host">host or IP where {@link LockVerifyServer}
+               /// is running
+               /// </param>
+               /// <param name="port">the port {@link LockVerifyServer} is
+               /// listening on
+               /// </param>
+               public VerifyingLockFactory(sbyte id, LockFactory lf, System.String host, int port)
+               {
+                       this.id = id;
+                       this.lf = lf;
+                       this.host = host;
+                       this.port = port;
+               }
+               
+               public override Lock MakeLock(System.String lockName)
+               {
+                       lock (this)
+                       {
+                               return new CheckedLock(this, lf.MakeLock(lockName));
+                       }
+               }
+               
+               public override void  ClearLock(System.String lockName)
+               {
+                       lock (this)
+                       {
+                               lf.ClearLock(lockName);
+                       }
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/SupportClass.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/SupportClass.cs
new file mode 100644 (file)
index 0000000..14fda61
--- /dev/null
@@ -0,0 +1,2340 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+using System.Collections;
+
+/// <summary>
+/// This interface should be implemented by any class whose instances are intended 
+/// to be executed by a thread.
+/// </summary>
+public interface IThreadRunnable
+{
+    /// <summary>
+    /// This method has to be implemented in order that starting of the thread causes the object's 
+    /// run method to be called in that separately executing thread.
+    /// </summary>
+    void Run();
+}
+
+/// <summary>
+/// Contains conversion support elements such as classes, interfaces and static methods.
+/// </summary>
+public class SupportClass
+{
+    public interface Checksum
+    {
+        void Reset();
+        void Update(int b);
+        void Update(byte[] b);
+        void Update(byte[] b, int offset, int length);
+        Int64 GetValue();
+    }
+
+    public class CRC32 : Checksum
+    {
+        private static readonly UInt32[] crcTable = InitializeCRCTable();
+
+        private static UInt32[] InitializeCRCTable()
+        {
+            UInt32[] crcTable = new UInt32[256];
+            for (UInt32 n = 0; n < 256; n++)
+            {
+                UInt32 c = n;
+                for (int k = 8; --k >= 0; )
+                {
+                    if ((c & 1) != 0)
+                        c = 0xedb88320 ^ (c >> 1);
+                    else
+                        c = c >> 1;
+                }
+                crcTable[n] = c;
+            }
+            return crcTable;
+        }
+
+        private UInt32 crc = 0;
+
+        public Int64 GetValue()
+        {
+            return (Int64)crc & 0xffffffffL;
+        }
+
+        public void Reset()
+        {
+            crc = 0;
+        }
+
+        public void Update(int bval)
+        {
+            UInt32 c = ~crc;
+            c = crcTable[(c ^ bval) & 0xff] ^ (c >> 8);
+            crc = ~c;
+        }
+
+        public void Update(byte[] buf, int off, int len)
+        {
+            UInt32 c = ~crc;
+            while (--len >= 0)
+                c = crcTable[(c ^ buf[off++]) & 0xff] ^ (c >> 8);
+            crc = ~c;
+        }
+
+        public void Update(byte[] buf)
+        {
+            Update(buf, 0, buf.Length);
+        }
+    }
+
+    public class TextSupport
+    {
+        /// <summary>
+        /// Copies an array of chars obtained from a String into a specified array of chars
+        /// </summary>
+        /// <param name="sourceString">The String to get the chars from</param>
+        /// <param name="sourceStart">Position of the String to start getting the chars</param>
+        /// <param name="sourceEnd">Position of the String to end getting the chars</param>
+        /// <param name="destinationArray">Array to return the chars</param>
+        /// <param name="destinationStart">Position of the destination array of chars to start storing the chars</param>
+        /// <returns>An array of chars</returns>
+        public static void GetCharsFromString(string sourceString, int sourceStart, int sourceEnd, char[] destinationArray, int destinationStart)
+        {
+            int sourceCounter;
+            int destinationCounter;
+            sourceCounter = sourceStart;
+            destinationCounter = destinationStart;
+            while (sourceCounter < sourceEnd)
+            {
+                destinationArray[destinationCounter] = (char)sourceString[sourceCounter];
+                sourceCounter++;
+                destinationCounter++;
+            }
+        }
+    }
+
+    /// <summary>
+    /// Support class used to handle threads
+    /// </summary>
+    public class ThreadClass : IThreadRunnable
+    {
+        /// <summary>
+        /// The instance of System.Threading.Thread
+        /// </summary>
+        private System.Threading.Thread threadField;
+
+
+        /// <summary>
+        /// Initializes a new instance of the ThreadClass class
+        /// </summary>
+        public ThreadClass()
+        {
+            threadField = new System.Threading.Thread(new System.Threading.ThreadStart(Run));
+        }
+
+        /// <summary>
+        /// Initializes a new instance of the Thread class.
+        /// </summary>
+        /// <param name="Name">The name of the thread</param>
+        public ThreadClass(System.String Name)
+        {
+            threadField = new System.Threading.Thread(new System.Threading.ThreadStart(Run));
+            this.Name = Name;
+        }
+
+        /// <summary>
+        /// Initializes a new instance of the Thread class.
+        /// </summary>
+        /// <param name="Start">A ThreadStart delegate that references the methods to be invoked when this thread begins executing</param>
+        public ThreadClass(System.Threading.ThreadStart Start)
+        {
+            threadField = new System.Threading.Thread(Start);
+        }
+
+        /// <summary>
+        /// Initializes a new instance of the Thread class.
+        /// </summary>
+        /// <param name="Start">A ThreadStart delegate that references the methods to be invoked when this thread begins executing</param>
+        /// <param name="Name">The name of the thread</param>
+        public ThreadClass(System.Threading.ThreadStart Start, System.String Name)
+        {
+            threadField = new System.Threading.Thread(Start);
+            this.Name = Name;
+        }
+
+        /// <summary>
+        /// This method has no functionality unless the method is overridden
+        /// </summary>
+        public virtual void Run()
+        {
+        }
+
+        /// <summary>
+        /// Causes the operating system to change the state of the current thread instance to ThreadState.Running
+        /// </summary>
+        public virtual void Start()
+        {
+            threadField.Start();
+        }
+
+        /// <summary>
+        /// Interrupts a thread that is in the WaitSleepJoin thread state
+        /// </summary>
+        public virtual void Interrupt()
+        {
+            threadField.Interrupt();
+        }
+
+        /// <summary>
+        /// Gets the current thread instance
+        /// </summary>
+        public System.Threading.Thread Instance
+        {
+            get
+            {
+                return threadField;
+            }
+            set
+            {
+                threadField = value;
+            }
+        }
+
+        /// <summary>
+        /// Gets or sets the name of the thread
+        /// </summary>
+        public System.String Name
+        {
+            get
+            {
+                return threadField.Name;
+            }
+            set
+            {
+                if (threadField.Name == null)
+                    threadField.Name = value;
+            }
+        }
+
+        public void SetDaemon(bool isDaemon)
+        {
+            threadField.IsBackground = isDaemon;
+        }
+
+        /// <summary>
+        /// Gets or sets a value indicating the scheduling priority of a thread
+        /// </summary>
+        public System.Threading.ThreadPriority Priority
+        {
+            get
+            {
+                try
+                {
+                    return threadField.Priority;
+                }
+                catch
+                {
+                    return System.Threading.ThreadPriority.Normal;
+                }
+            }
+            set
+            {
+                try
+                {
+                    threadField.Priority = value;
+                }
+                catch{}
+                
+            }
+        }
+
+        /// <summary>
+        /// Gets a value indicating the execution status of the current thread
+        /// </summary>
+        public bool IsAlive
+        {
+            get
+            {
+                return threadField.IsAlive;
+            }
+        }
+
+        /// <summary>
+        /// Gets or sets a value indicating whether or not a thread is a background thread.
+        /// </summary>
+        public bool IsBackground
+        {
+            get
+            {
+                return threadField.IsBackground;
+            }
+            set
+            {
+                threadField.IsBackground = value;
+            }
+        }
+
+        /// <summary>
+        /// Blocks the calling thread until a thread terminates
+        /// </summary>
+        public void Join()
+        {
+            threadField.Join();
+        }
+
+        /// <summary>
+        /// Blocks the calling thread until a thread terminates or the specified time elapses
+        /// </summary>
+        /// <param name="MiliSeconds">Time of wait in milliseconds</param>
+        public void Join(long MiliSeconds)
+        {
+            threadField.Join(new System.TimeSpan(MiliSeconds * 10000));
+        }
+
+        /// <summary>
+        /// Blocks the calling thread until a thread terminates or the specified time elapses
+        /// </summary>
+        /// <param name="MiliSeconds">Time of wait in milliseconds</param>
+        /// <param name="NanoSeconds">Time of wait in nanoseconds</param>
+        public void Join(long MiliSeconds, int NanoSeconds)
+        {
+            threadField.Join(new System.TimeSpan(MiliSeconds * 10000 + NanoSeconds * 100));
+        }
+
+        /// <summary>
+        /// Resumes a thread that has been suspended
+        /// </summary>
+        public void Resume()
+        {
+            System.Threading.Monitor.PulseAll(threadField);
+        }
+
+        /// <summary>
+        /// Raises a ThreadAbortException in the thread on which it is invoked, 
+        /// to begin the process of terminating the thread. Calling this method 
+        /// usually terminates the thread
+        /// </summary>
+        public void Abort()
+        {
+            threadField.Abort();
+        }
+
+        /// <summary>
+        /// Raises a ThreadAbortException in the thread on which it is invoked, 
+        /// to begin the process of terminating the thread while also providing
+        /// exception information about the thread termination. 
+        /// Calling this method usually terminates the thread.
+        /// </summary>
+        /// <param name="stateInfo">An object that contains application-specific information, such as state, which can be used by the thread being aborted</param>
+        public void Abort(object stateInfo)
+        {
+            threadField.Abort(stateInfo);
+        }
+
+        /// <summary>
+        /// Suspends the thread, if the thread is already suspended it has no effect
+        /// </summary>
+        public void Suspend()
+        {
+            System.Threading.Monitor.Wait(threadField);
+        }
+
+        /// <summary>
+        /// Obtain a String that represents the current object
+        /// </summary>
+        /// <returns>A String that represents the current object</returns>
+        public override System.String ToString()
+        {
+            return "Thread[" + Name + "," + Priority.ToString() + "]";
+        }
+
+        [ThreadStatic]
+        static ThreadClass This = null;
+
+        // named as the Java version
+        public static ThreadClass CurrentThread()
+        {
+            return Current();
+        }
+
+        public static void Sleep(long ms)
+        {
+            // casting long ms to int ms could lose resolution, however unlikely
+            // that someone would want to sleep for that long...
+            System.Threading.Thread.Sleep((int)ms);
+        }
+
+        /// <summary>
+        /// Gets the currently running thread
+        /// </summary>
+        /// <returns>The currently running thread</returns>
+        public static ThreadClass Current()
+        {
+            if (This == null)
+            {
+                This = new ThreadClass();
+                This.Instance = System.Threading.Thread.CurrentThread;
+            }
+            return This;
+        }
+
+        public static bool operator ==(ThreadClass t1, object t2)
+        {
+            if (((object)t1) == null) return t2 == null;
+            return t1.Equals(t2);
+        }
+
+        public static bool operator !=(ThreadClass t1, object t2)
+        {
+            return !(t1 == t2);
+        }
+
+        public override bool Equals(object obj)
+        {
+            if (obj == null) return false;
+            if (obj is ThreadClass) return this.threadField.Equals( ((ThreadClass)obj).threadField  );
+            return false;
+        }
+
+        public override int GetHashCode()
+        {
+            return this.threadField.GetHashCode();
+        }
+    }
+
+    /// <summary>
+    /// Represents the methods to support some operations over files.
+    /// </summary>
+    public class FileSupport
+    {
+        /// <summary>
+        /// Returns an array of abstract pathnames representing the files and directories of the specified path.
+        /// </summary>
+        /// <param name="path">The abstract pathname to list it childs.</param>
+        /// <returns>An array of abstract pathnames childs of the path specified or null if the path is not a directory</returns>
+        public static System.IO.FileInfo[] GetFiles(System.IO.FileInfo path)
+        {
+            if ((path.Attributes & System.IO.FileAttributes.Directory) > 0)
+            {                                                                                                                           
+                String[] fullpathnames = System.IO.Directory.GetFileSystemEntries(path.FullName);
+                System.IO.FileInfo[] result = new System.IO.FileInfo[fullpathnames.Length];
+                for (int i = 0; i < result.Length ; i++)
+                    result[i] = new System.IO.FileInfo(fullpathnames[i]);
+                return result;
+            }
+            else
+                return null;
+        }
+
+        /// <summary>
+        /// Returns a list of files in a give directory.
+        /// </summary>
+        /// <param name="fullName">The full path name to the directory.</param>
+        /// <param name="indexFileNameFilter"></param>
+        /// <returns>An array containing the files.</returns>
+        public static System.String[] GetLuceneIndexFiles(System.String fullName, 
+                                                          Mono.Lucene.Net.Index.IndexFileNameFilter indexFileNameFilter)
+        {
+            System.IO.DirectoryInfo dInfo = new System.IO.DirectoryInfo(fullName);
+            System.Collections.ArrayList list = new System.Collections.ArrayList();
+            foreach (System.IO.FileInfo fInfo in dInfo.GetFiles())
+            {
+                if (indexFileNameFilter.Accept(fInfo, fInfo.Name) == true)
+                {
+                    list.Add(fInfo.Name);
+                }
+            }
+            System.String[] retFiles = new System.String[list.Count];
+            list.CopyTo(retFiles);
+            return retFiles;
+        }
+
+        // Disable the obsolete warning since we must use FileStream.Handle
+        // because Mono does not support FileSystem.SafeFileHandle at present.
+#pragma warning disable 618
+
+        /// <summary>
+        /// Flushes the specified file stream. Ensures that all buffered
+        /// data is actually written to the file system.
+        /// </summary>
+        /// <param name="fileStream">The file stream.</param>
+        public static void Sync(System.IO.FileStream fileStream)
+        {
+            if (fileStream == null)
+                throw new ArgumentNullException("fileStream");
+
+            fileStream.Flush();
+
+            //if (OS.IsWindows)
+            //{
+            //    if (!FlushFileBuffers(fileStream.Handle))
+            //        throw new System.IO.IOException();
+            //}
+            //else if (OS.IsUnix)
+            //{
+            //    if (fsync(fileStream.Handle) != IntPtr.Zero)
+            //    throw new System.IO.IOException();
+            //}
+            //else
+            //{
+            //    throw new NotImplementedException();
+            //}
+        }
+
+#pragma warning restore 618
+
+        //[System.Runtime.InteropServices.DllImport("libc")]
+        //extern static IntPtr fsync(IntPtr fd);
+
+        //[System.Runtime.InteropServices.DllImport("kernel32.dll")]
+        //extern static bool FlushFileBuffers(IntPtr hFile);
+    }
+
+    /// <summary>
+    /// A simple class for number conversions.
+    /// </summary>
+    public class Number
+    {
+        /// <summary>
+        /// Min radix value.
+        /// </summary>
+        public const int MIN_RADIX = 2;
+        /// <summary>
+        /// Max radix value.
+        /// </summary>
+        public const int MAX_RADIX = 36;
+
+        private const System.String digits = "0123456789abcdefghijklmnopqrstuvwxyz";
+
+
+        /// <summary>
+        /// Converts a number to System.String.
+        /// </summary>
+        /// <param name="number"></param>
+        /// <returns></returns>
+        public static System.String ToString(long number)
+        {
+            System.Text.StringBuilder s = new System.Text.StringBuilder();
+
+            if (number == 0)
+            {
+                s.Append("0");
+            }
+            else
+            {
+                if (number < 0)
+                {
+                    s.Append("-");
+                    number = -number;
+                }
+
+                while (number > 0)
+                {
+                    char c = digits[(int)number % 36];
+                    s.Insert(0, c);
+                    number = number / 36;
+                }
+            }
+
+            return s.ToString();
+        }
+           
+
+        /// <summary>
+        /// Converts a number to System.String.
+        /// </summary>
+        /// <param name="f"></param>
+        /// <returns></returns>
+        public static System.String ToString(float f)
+        {
+            if (((float)(int)f) == f)
+            {
+                return ((int)f).ToString() + ".0";
+            }
+            else
+            {
+                return f.ToString(System.Globalization.NumberFormatInfo.InvariantInfo);
+            }
+        }
+
+        /// <summary>
+        /// Converts a number to System.String in the specified radix.
+        /// </summary>
+        /// <param name="i">A number to be converted.</param>
+        /// <param name="radix">A radix.</param>
+        /// <returns>A System.String representation of the number in the specified redix.</returns>
+        public static System.String ToString(long i, int radix)
+        {
+            if (radix < MIN_RADIX || radix > MAX_RADIX)
+                radix = 10;
+
+            char[] buf = new char[65];
+            int charPos = 64;
+            bool negative = (i < 0);
+
+            if (!negative) 
+            {
+                i = -i;
+            }
+
+            while (i <= -radix) 
+            {
+                buf[charPos--] = digits[(int)(-(i % radix))];
+                i = i / radix;
+            }
+            buf[charPos] = digits[(int)(-i)];
+
+            if (negative) 
+            {
+                buf[--charPos] = '-';
+            }
+
+            return new System.String(buf, charPos, (65 - charPos)); 
+        }
+
+        /// <summary>
+        /// Parses a number in the specified radix.
+        /// </summary>
+        /// <param name="s">An input System.String.</param>
+        /// <param name="radix">A radix.</param>
+        /// <returns>The parsed number in the specified radix.</returns>
+        public static long Parse(System.String s, int radix)
+        {
+            if (s == null) 
+            {
+                throw new ArgumentException("null");
+            }
+
+            if (radix < MIN_RADIX) 
+            {
+                throw new NotSupportedException("radix " + radix +
+                    " less than Number.MIN_RADIX");
+            }
+            if (radix > MAX_RADIX) 
+            {
+                throw new NotSupportedException("radix " + radix +
+                    " greater than Number.MAX_RADIX");
+            }
+
+            long result = 0;
+            long mult = 1;
+
+            s = s.ToLower();
+            
+            for (int i = s.Length - 1; i >= 0; i--)
+            {
+                int weight = digits.IndexOf(s[i]);
+                if (weight == -1)
+                    throw new FormatException("Invalid number for the specified radix");
+
+                result += (weight * mult);
+                mult *= radix;
+            }
+
+            return result;
+        }
+
+        /// <summary>
+        /// Performs an unsigned bitwise right shift with the specified number
+        /// </summary>
+        /// <param name="number">Number to operate on</param>
+        /// <param name="bits">Ammount of bits to shift</param>
+        /// <returns>The resulting number from the shift operation</returns>
+        public static int URShift(int number, int bits)
+        {
+            return (int) (((uint) number) >> bits);
+        }
+
+
+        /// <summary>
+        /// Performs an unsigned bitwise right shift with the specified number
+        /// </summary>
+        /// <param name="number">Number to operate on</param>
+        /// <param name="bits">Ammount of bits to shift</param>
+        /// <returns>The resulting number from the shift operation</returns>
+        public static long URShift(long number, int bits)
+        {
+            return (long) (((ulong) number) >> bits);
+        }
+
+
+        /// <summary>
+        /// Returns the index of the first bit that is set to true that occurs 
+        /// on or after the specified starting index. If no such bit exists 
+        /// then -1 is returned.
+        /// </summary>
+        /// <param name="bits">The BitArray object.</param>
+        /// <param name="fromIndex">The index to start checking from (inclusive).</param>
+        /// <returns>The index of the next set bit.</returns>
+        public static int NextSetBit(System.Collections.BitArray bits, int fromIndex)
+        {
+            for (int i = fromIndex; i < bits.Length; i++)
+            {
+                if (bits[i] == true)
+                {
+                    return i;
+                }
+            }
+            return -1;
+        }
+        
+        /// <summary>
+        /// Converts a System.String number to long.
+        /// </summary>
+        /// <param name="s"></param>
+        /// <returns></returns>
+        public static long ToInt64(System.String s)
+        {
+            long number = 0;
+            long factor;
+
+            // handle negative number
+            if (s.StartsWith("-"))
+            {
+                s = s.Substring(1);
+                factor = -1;
+            }
+            else
+            {
+                factor = 1;
+            }
+
+            // generate number
+            for (int i = s.Length - 1; i > -1; i--)
+            {
+                int n = digits.IndexOf(s[i]);
+
+                // not supporting fractional or scientific notations
+                if (n < 0)
+                    throw new System.ArgumentException("Invalid or unsupported character in number: " + s[i]);
+
+                number += (n * factor);
+                factor *= 36;
+            }
+
+            return number;
+        }
+    }
+
+    /// <summary>
+    /// Mimics Java's Character class.
+    /// </summary>
+    public class Character
+    {
+        private const char charNull= '\0';
+        private const char charZero = '0';
+        private const char charA = 'a';
+
+        /// <summary>
+        /// </summary>
+        public static int MAX_RADIX
+        {
+            get
+            {
+                return 36;
+            }
+        }
+
+        /// <summary>
+        /// </summary>
+        public static int MIN_RADIX
+        {
+            get
+            {
+                return 2;
+            }
+        }
+
+        /// <summary>
+        /// 
+        /// </summary>
+        /// <param name="digit"></param>
+        /// <param name="radix"></param>
+        /// <returns></returns>
+        public static char ForDigit(int digit, int radix)
+        {
+            // if radix or digit is out of range,
+            // return the null character.
+            if (radix < Character.MIN_RADIX)
+                return charNull;
+            if (radix > Character.MAX_RADIX)
+                return charNull;
+            if (digit < 0)
+                return charNull;
+            if (digit >= radix)
+                return charNull;
+
+            // if digit is less than 10,
+            // return '0' plus digit
+            if (digit < 10)
+                return (char) ( (int) charZero + digit);
+
+            // otherwise, return 'a' plus digit.
+            return (char) ((int) charA + digit - 10);
+        }
+    }
+
+    /// <summary>
+    /// 
+    /// </summary>
+    public class Double
+    {
+        public static System.Double Parse(System.String s)
+        {
+            try
+            {
+                return System.Double.Parse(s.Replace(".", System.Globalization.CultureInfo.CurrentCulture.NumberFormat.NumberDecimalSeparator));
+            }
+            catch (OverflowException)
+            {
+                return double.MaxValue;
+            }
+        }
+    }
+
+    /// <summary>
+    /// 
+    /// </summary>
+    public class Single
+    {
+        /// <summary>
+        /// 
+        /// </summary>
+        /// <param name="s"></param>
+        /// <param name="style"></param>
+        /// <param name="provider"></param>
+        /// <returns></returns>
+        public static System.Single Parse(System.String s, System.Globalization.NumberStyles style, System.IFormatProvider provider)
+        {
+            try
+            {
+                if (s.EndsWith("f") || s.EndsWith("F"))
+                    return System.Single.Parse(s.Substring(0, s.Length - 1), style, provider);
+                else
+                    return System.Single.Parse(s, style, provider);
+            }
+            catch (System.FormatException fex)
+            {
+                throw fex;                                     
+            }
+        }
+
+        /// <summary>
+        /// 
+        /// </summary>
+        /// <param name="s"></param>
+        /// <param name="provider"></param>
+        /// <returns></returns>
+        public static System.Single Parse(System.String s, System.IFormatProvider provider)
+        {
+            try
+            {
+                if (s.EndsWith("f") || s.EndsWith("F"))
+                    return System.Single.Parse(s.Substring(0, s.Length - 1), provider);
+                else
+                    return System.Single.Parse(s, provider);
+            }
+            catch (System.FormatException fex)
+            {
+                throw fex;                                     
+            }
+        }
+
+        /// <summary>
+        /// 
+        /// </summary>
+        /// <param name="s"></param>
+        /// <param name="style"></param>
+        /// <returns></returns>
+        public static System.Single Parse(System.String s, System.Globalization.NumberStyles style)
+        {
+            try
+            {
+                if (s.EndsWith("f") || s.EndsWith("F"))
+                    return System.Single.Parse(s.Substring(0, s.Length - 1), style);
+                else
+                    return System.Single.Parse(s, style);
+            }
+            catch(System.FormatException fex)
+            {
+                throw fex;                                     
+            }
+        }
+
+        /// <summary>
+        /// 
+        /// </summary>
+        /// <param name="s"></param>
+        /// <returns></returns>
+        public static System.Single Parse(System.String s)
+        {
+            try
+            {
+                if (s.EndsWith("f") || s.EndsWith("F"))
+                    return System.Single.Parse(s.Substring(0, s.Length - 1).Replace(".", System.Globalization.CultureInfo.CurrentCulture.NumberFormat.NumberDecimalSeparator));
+                else
+                    return System.Single.Parse(s.Replace(".", System.Globalization.CultureInfo.CurrentCulture.NumberFormat.NumberDecimalSeparator));
+            }
+            catch(System.FormatException fex)
+            {
+                throw fex;                                     
+            }
+        }
+
+        public static bool TryParse(System.String s, out float f)
+        {
+            bool ok = false;
+
+            if (s.EndsWith("f") || s.EndsWith("F"))
+                ok = System.Single.TryParse(s.Substring(0, s.Length - 1).Replace(".", System.Globalization.CultureInfo.CurrentCulture.NumberFormat.NumberDecimalSeparator), out f);
+            else
+                ok = System.Single.TryParse(s.Replace(".", System.Globalization.CultureInfo.CurrentCulture.NumberFormat.NumberDecimalSeparator), out f);
+
+            return ok;
+        }
+
+        /// <summary>
+        /// 
+        /// </summary>
+        /// <param name="f"></param>
+        /// <returns></returns>
+        public static string ToString(float f)
+        {
+            return f.ToString().Replace(System.Globalization.CultureInfo.CurrentCulture.NumberFormat.NumberDecimalSeparator, ".");
+        }
+
+        /// <summary>
+        /// 
+        /// </summary>
+        /// <param name="f"></param>
+        /// <param name="format"></param>
+        /// <returns></returns>
+        public static string ToString(float f, string format)
+        {
+            return f.ToString(format).Replace(System.Globalization.CultureInfo.CurrentCulture.NumberFormat.NumberDecimalSeparator, ".");
+        }
+
+        public static int FloatToIntBits(float value)
+        {
+            return BitConverter.ToInt32(BitConverter.GetBytes(value), 0);
+        }
+
+        public static float IntBitsToFloat(int value)
+        {
+            return BitConverter.ToSingle(BitConverter.GetBytes(value), 0);
+        }
+    }
+
+    /// <summary>
+    /// 
+    /// </summary>
+    public class AppSettings
+    {
+        static System.Collections.Specialized.ListDictionary settings = new System.Collections.Specialized.ListDictionary();
+
+        /// <summary>
+        /// 
+        /// </summary>
+        /// <param name="key"></param>
+        /// <param name="defValue"></param>
+        public static void Set(System.String key, int defValue)
+        {
+            settings[key] = defValue;
+        }
+
+        /// <summary>
+        /// 
+        /// </summary>
+        /// <param name="key"></param>
+        /// <param name="defValue"></param>
+        public static void Set(System.String key, long defValue)
+        {
+            settings[key] = defValue;
+        }
+
+        /// <summary>
+        /// 
+        /// </summary>
+        /// <param name="Key"></param>
+        /// <param name="Value"></param>
+        public static void Set(System.String key, System.String defValue)
+        {
+            settings[key] = defValue;
+        }
+
+        /// <summary>
+        /// 
+        /// </summary>
+        /// <param name="Key"></param>
+        /// <param name="Value"></param>
+        public static void Set(System.String key, bool defValue)
+        {
+            settings[key] = defValue;
+        }
+
+        /// <summary>
+        /// 
+        /// </summary>
+        /// <param name="key"></param>
+        /// <param name="defValue"></param>
+        /// <returns></returns>
+        public static int Get(System.String key, int defValue)
+        {
+            if (settings[key] != null)
+            {
+                return (int) settings[key];
+            }
+
+            System.String theValue = System.Configuration.ConfigurationManager.AppSettings.Get(key);
+            if (theValue == null)
+            {
+                return defValue;
+            }
+            int retValue = System.Convert.ToInt32(theValue.Trim());
+            settings[key] = retValue;
+            return retValue;
+        }
+
+        /// <summary>
+        /// 
+        /// </summary>
+        /// <param name="key"></param>
+        /// <param name="defValue"></param>
+        /// <returns></returns>
+        public static long Get(System.String key, long defValue)
+        {
+            if (settings[key] != null)
+            {
+                return (long) settings[key];
+            }
+
+            System.String theValue = System.Configuration.ConfigurationManager.AppSettings.Get(key);
+            if (theValue == null)
+            {
+                return defValue;
+            }
+            long retValue = System.Convert.ToInt64(theValue.Trim());
+            settings[key] = retValue;
+            return retValue;
+        }
+
+        /// <summary>
+        /// 
+        /// </summary>
+        /// <param name="key"></param>
+        /// <param name="defValue"></param>
+        /// <returns></returns>
+        public static System.String Get(System.String key, System.String defValue)
+        {
+            if (settings[key] != null)
+            {
+                return (System.String) settings[key];
+            }
+
+            System.String theValue = System.Configuration.ConfigurationManager.AppSettings.Get(key);
+            if (theValue == null)
+            {
+                return defValue;
+            }
+            settings[key] = theValue;
+            return theValue;
+        }
+
+        public static bool Get(System.String key, bool defValue)
+        {
+            if (settings[key] != null)
+            {
+                return (bool)settings[key];
+            }
+
+            System.String theValue = System.Configuration.ConfigurationManager.AppSettings.Get(key);
+            if (theValue == null)
+            {
+                return defValue;
+            }
+            bool retValue = System.Convert.ToBoolean(theValue.Trim());
+            settings[key] = retValue;
+            return retValue;
+        }
+    }
+
+    /// <summary>
+    /// This class provides supporting methods of java.util.BitSet
+    /// that are not present in System.Collections.BitArray.
+    /// </summary>
+    public class BitSetSupport
+    {
+        /// <summary>
+        /// Returns the next set bit at or after index, or -1 if no such bit exists.
+        /// </summary>
+        /// <param name="bitArray"></param>
+        /// <param name="index">the index of bit array at which to start checking</param>
+        /// <returns>the next set bit or -1</returns>
+        public static int NextSetBit(System.Collections.BitArray bitArray, int index)
+        {
+            while (index < bitArray.Length)
+            {
+                // if index bit is set, return it
+                // otherwise check next index bit
+                if (bitArray.Get(index))
+                    return index;
+                else
+                    index++;
+            }
+            // if no bits are set at or after index, return -1
+            return -1;
+        }
+
+        /// <summary>
+        /// Returns the next un-set bit at or after index, or -1 if no such bit exists.
+        /// </summary>
+        /// <param name="bitArray"></param>
+        /// <param name="index">the index of bit array at which to start checking</param>
+        /// <returns>the next set bit or -1</returns>
+        public static int NextClearBit(System.Collections.BitArray bitArray, int index)
+        {
+            while (index < bitArray.Length)
+            {
+                // if index bit is not set, return it
+                // otherwise check next index bit
+                if (!bitArray.Get(index))
+                    return index;
+                else
+                    index++;
+            }
+            // if no bits are set at or after index, return -1
+            return -1;
+        }
+
+        /// <summary>
+        /// Returns the number of bits set to true in this BitSet.
+        /// </summary>
+        /// <param name="bits">The BitArray object.</param>
+        /// <returns>The number of bits set to true in this BitSet.</returns>
+        public static int Cardinality(System.Collections.BitArray bits)
+        {
+            int count = 0;
+            for (int i = 0; i < bits.Count; i++)
+            {
+                if (bits[i])
+                    count++;
+            }
+            return count;
+        }
+    }
+
+    /// <summary>
+    /// Summary description for TestSupportClass.
+    /// </summary>
+    public class Compare
+    {
+        /// <summary>
+        /// Compares two Term arrays for equality.
+        /// </summary>
+        /// <param name="t1">First Term array to compare</param>
+        /// <param name="t2">Second Term array to compare</param>
+        /// <returns>true if the Terms are equal in both arrays, false otherwise</returns>
+        public static bool CompareTermArrays(Mono.Lucene.Net.Index.Term[] t1, Mono.Lucene.Net.Index.Term[] t2)
+        {
+            if (t1.Length != t2.Length)
+                return false;
+            for (int i = 0; i < t1.Length; i++)
+            {
+                if (t1[i].CompareTo(t2[i]) == 0)
+                {
+                    return true;
+                }
+            }
+            return false;
+        }
+    }
+    
+    #region WEAKHASHTABLE
+    /// <summary>
+    /// A Hashtable which holds weak references to its keys so they
+    /// can be collected during GC. 
+    /// </summary>
+    [System.Diagnostics.DebuggerDisplay("Count = {Values.Count}")]
+    public class WeakHashTable : Hashtable, IEnumerable
+    {
+        /// <summary>
+        /// A weak referene wrapper for the hashtable keys. Whenever a key\value pair 
+        /// is added to the hashtable, the key is wrapped using a WeakKey. WeakKey saves the
+        /// value of the original object hashcode for fast comparison.
+        /// </summary>
+        class WeakKey 
+        {
+            WeakReference reference;
+            int hashCode;
+
+            public WeakKey(object key)
+            {
+                if (key == null)
+                    throw new ArgumentNullException("key");
+
+                hashCode = key.GetHashCode();
+                reference = new WeakReference(key);
+            }
+
+            public override int GetHashCode()
+            {
+                return hashCode;
+            }
+
+            public object Target
+            {
+                get { return reference.Target; }
+            }
+
+            public bool IsAlive
+            {
+                get { return reference.IsAlive; }
+            }
+        }
+
+        /// <summary>
+        /// A Dictionary enumerator which wraps the original hashtable enumerator 
+        /// and performs 2 tasks: Extract the real key from a WeakKey and skip keys
+        /// that were already collected.
+        /// </summary>
+        class WeakDictionaryEnumerator : IDictionaryEnumerator
+        {
+            IDictionaryEnumerator baseEnumerator;
+            object currentKey;
+            object currentValue;
+
+            public WeakDictionaryEnumerator(IDictionaryEnumerator baseEnumerator)
+            {
+                this.baseEnumerator = baseEnumerator;
+            }
+
+            public DictionaryEntry Entry
+            {
+                get
+                {
+                    return new DictionaryEntry(this.currentKey, this.currentValue);
+                }
+            }
+
+            public object Key
+            {
+                get
+                {
+                    return this.currentKey;
+                }
+            }
+
+            public object Value
+            {
+                get
+                {
+                    return this.currentValue;
+                }
+            }
+
+            public object Current
+            {
+                get
+                {
+                    return Entry;
+                }
+            }
+
+            public bool MoveNext()
+            {
+                while (baseEnumerator.MoveNext())
+                {
+                    object key = ((WeakKey)baseEnumerator.Key).Target;
+                    if (key != null)
+                    {
+                        this.currentKey = key;
+                        this.currentValue = baseEnumerator.Value;
+                        return true;
+                    }
+                }
+                return false;
+            }
+
+            public void Reset()
+            {
+                baseEnumerator.Reset();
+                this.currentKey = null;
+                this.currentValue = null;
+            }
+        }
+
+
+        /// <summary>
+        /// Serves as a simple "GC Monitor" that indicates whether cleanup is needed. 
+        /// If collectableObject.IsAlive is false, GC has occurred and we should perform cleanup
+        /// </summary>
+        WeakReference collectableObject = new WeakReference(new Object());
+
+        /// <summary>
+        /// Customize the hashtable lookup process by overriding KeyEquals. KeyEquals
+        /// will compare both WeakKey to WeakKey and WeakKey to real keys
+        /// </summary>
+        protected override bool KeyEquals(object x, object y)
+        {
+            if (x == y)
+                return true;
+
+            if (x is WeakKey)
+            {
+                x = ((WeakKey)x).Target;
+                if (x == null)
+                    return false;
+            }
+
+            if (y is WeakKey)
+            {
+                y = ((WeakKey)y).Target;
+                if (y == null)
+                    return false;
+            }
+
+            return x.Equals(y);
+        }
+
+        protected override int GetHash(object key)
+        {
+            return key.GetHashCode();
+        }
+
+        /// <summary>
+        /// Perform cleanup if GC occurred
+        /// </summary>
+        private void CleanIfNeeded()
+        {
+            if (collectableObject.Target == null)
+            {
+                Clean();
+                collectableObject = new WeakReference(new Object());
+            }
+        }
+
+        /// <summary>
+        /// Iterate over all keys and remove keys that were collected
+        /// </summary>
+        private void Clean()
+        {
+            foreach (WeakKey wtk in ((Hashtable)base.Clone()).Keys)
+            {
+                if (!wtk.IsAlive)
+                {
+                    Remove(wtk);
+                }
+            }
+        }
+
+
+        /// <summary>
+        /// Wrap each key with a WeakKey and add it to the hashtable
+        /// </summary>
+        public override void Add(object key, object value)
+        {
+            CleanIfNeeded();
+            base.Add(new WeakKey(key), value);
+        }
+
+        public override IDictionaryEnumerator GetEnumerator()
+        {
+            Hashtable tmp = null;
+            tmp = (Hashtable)base.Clone();
+            return new WeakDictionaryEnumerator(tmp.GetEnumerator());
+        }
+
+        /// <summary>
+        /// Create a temporary copy of the real keys and return that
+        /// </summary>
+        public override ICollection Keys
+        {
+            get
+            {
+                ArrayList keys = new ArrayList(Count);
+                Hashtable tmpTable = (Hashtable)base.Clone();
+                
+                foreach (WeakKey key in tmpTable.Keys)
+                {
+                    object realKey = key.Target;
+                    if (realKey != null)
+                        keys.Add(realKey);
+                }
+                
+                return keys;
+            }
+        }
+
+        public override object this[object key]
+        {
+            get
+            {
+                return base[key];
+            }
+            set
+            {
+                CleanIfNeeded();
+                base[new WeakKey(key)] = value;
+            }
+        }
+
+        public override void CopyTo(Array array, int index)
+        {
+            int arrayIndex = index;
+            foreach (DictionaryEntry de in this)
+            {
+                array.SetValue(de, arrayIndex++);
+            }
+        }
+
+        public override int Count
+        {
+            get
+            {
+                CleanIfNeeded();
+                return base.Count;
+            }
+        }
+
+        IEnumerator IEnumerable.GetEnumerator()
+        {
+            return GetEnumerator();
+        }
+    }
+    #endregion
+
+    public class Cryptography
+    {
+        static public bool FIPSCompliant = false;
+
+        static public System.Security.Cryptography.HashAlgorithm GetHashAlgorithm()
+        {
+            if (FIPSCompliant)
+            {
+                //LUCENENET-175
+                //No Assumptions should be made on the HashAlgorithm. It may change in time.
+                //SHA256 SHA384 SHA512 etc.
+                return System.Security.Cryptography.SHA1.Create();
+            }
+            return System.Security.Cryptography.MD5.Create();
+        }
+    }
+
+    /// <summary>
+    /// Support class used to handle Hashtable addition, which does a check 
+    /// first to make sure the added item is unique in the hash.
+    /// </summary>
+    public class CollectionsHelper
+    {
+        public static void Add(System.Collections.Hashtable hashtable, System.Object item)
+        {
+            hashtable.Add(item, item);
+        }
+
+        public static void AddIfNotContains(System.Collections.Hashtable hashtable, System.Object item)
+        {
+            if (hashtable.Contains(item) == false)
+            {
+                hashtable.Add(item, item);
+            }
+        }
+
+        public static void AddIfNotContains(System.Collections.ArrayList hashtable, System.Object item)
+        {
+            if (hashtable.Contains(item) == false)
+            {
+                hashtable.Add(item);
+            }
+        }
+
+        public static void AddAll(System.Collections.Hashtable hashtable, System.Collections.ICollection items)
+        {
+            System.Collections.IEnumerator iter = items.GetEnumerator();
+            System.Object item;
+            while (iter.MoveNext())
+            {
+                item = iter.Current;
+                hashtable.Add(item, item);
+            }
+        }
+
+        public static void AddAllIfNotContains(System.Collections.Hashtable hashtable, System.Collections.IList items)
+        {
+            System.Object item;
+            for (int i = 0; i < items.Count; i++)
+            {
+                item = items[i];
+                if (hashtable.Contains(item) == false)
+                {
+                    hashtable.Add(item, item);
+                }
+            }
+        }
+
+        public static void AddAllIfNotContains(System.Collections.Hashtable hashtable, System.Collections.ICollection items)
+        {
+            System.Collections.IEnumerator iter = items.GetEnumerator();
+            System.Object item;
+            while (iter.MoveNext())
+            {
+                item = iter.Current;
+                if (hashtable.Contains(item) == false)
+                {
+                    hashtable.Add(item, item);
+                }
+            }
+        }
+
+        public static void AddAllIfNotContains(System.Collections.Generic.IDictionary<string,string> hashtable, System.Collections.Generic.ICollection<string> items)
+        {
+            foreach (string s in items)
+            {
+                if (hashtable.ContainsKey(s) == false)
+                {
+                    hashtable.Add(s, s);
+                }
+            }
+        }
+
+        public static void AddAll(System.Collections.Generic.IDictionary<string, string> hashtable, System.Collections.Generic.ICollection<string> items)
+        {
+            foreach (string s in items)
+            {
+                hashtable.Add(s, s);
+            }
+        }
+
+        public static bool Contains(System.Collections.Generic.ICollection<string> col, string item)
+        {
+            foreach (string s in col) if (s == item) return true;
+            return false;
+        }
+
+        public static bool Contains(System.Collections.ICollection col, System.Object item)
+        {
+            System.Collections.IEnumerator iter = col.GetEnumerator();
+            while (iter.MoveNext())
+            {
+                if (iter.Current.Equals(item))
+                    return true;
+            }
+            return false;
+        }
+
+
+        public static System.String CollectionToString(System.Collections.Generic.IDictionary<string, string> c)
+        {
+            Hashtable t = new Hashtable();
+            foreach (string key in c.Keys)
+            {
+                t.Add(key, c[key]);
+            }
+            return CollectionToString(t);
+        }
+
+        /// <summary>
+        /// Converts the specified collection to its string representation.
+        /// </summary>
+        /// <param name="c">The collection to convert to string.</param>
+        /// <returns>A string representation of the specified collection.</returns>
+        public static System.String CollectionToString(System.Collections.ICollection c)
+        {
+            System.Text.StringBuilder s = new System.Text.StringBuilder();
+
+            if (c != null)
+            {
+
+                System.Collections.ArrayList l = new System.Collections.ArrayList(c);
+
+                bool isDictionary = (c is System.Collections.BitArray || c is System.Collections.Hashtable || c is System.Collections.IDictionary || c is System.Collections.Specialized.NameValueCollection || (l.Count > 0 && l[0] is System.Collections.DictionaryEntry));
+                for (int index = 0; index < l.Count; index++)
+                {
+                    if (l[index] == null)
+                        s.Append("null");
+                    else if (!isDictionary)
+                        s.Append(l[index]);
+                    else
+                    {
+                        isDictionary = true;
+                        if (c is System.Collections.Specialized.NameValueCollection)
+                            s.Append(((System.Collections.Specialized.NameValueCollection)c).GetKey(index));
+                        else
+                            s.Append(((System.Collections.DictionaryEntry)l[index]).Key);
+                        s.Append("=");
+                        if (c is System.Collections.Specialized.NameValueCollection)
+                            s.Append(((System.Collections.Specialized.NameValueCollection)c).GetValues(index)[0]);
+                        else
+                            s.Append(((System.Collections.DictionaryEntry)l[index]).Value);
+
+                    }
+                    if (index < l.Count - 1)
+                        s.Append(", ");
+                }
+
+                if (isDictionary)
+                {
+                    if (c is System.Collections.ArrayList)
+                        isDictionary = false;
+                }
+                if (isDictionary)
+                {
+                    s.Insert(0, "{");
+                    s.Append("}");
+                }
+                else
+                {
+                    s.Insert(0, "[");
+                    s.Append("]");
+                }
+            }
+            else
+                s.Insert(0, "null");
+            return s.ToString();
+        }
+
+        /// <summary>
+        /// Compares two string arrays for equality.
+        /// </summary>
+        /// <param name="l1">First string array list to compare</param>
+        /// <param name="l2">Second string array list to compare</param>
+        /// <returns>true if the strings are equal in both arrays, false otherwise</returns>
+        public static bool CompareStringArrays(System.String[] l1, System.String[] l2)
+        {
+            if (l1.Length != l2.Length)
+                return false;
+            for (int i = 0; i < l1.Length; i++)
+            {
+                if (l1[i] != l2[i])
+                    return false;
+            }
+            return true;
+        }
+
+        /// <summary>
+        /// Sorts an IList collections
+        /// </summary>
+        /// <param name="list">The System.Collections.IList instance that will be sorted</param>
+        /// <param name="Comparator">The Comparator criteria, null to use natural comparator.</param>
+        public static void Sort(System.Collections.IList list, System.Collections.IComparer Comparator)
+        {
+            if (((System.Collections.ArrayList)list).IsReadOnly)
+                throw new System.NotSupportedException();
+
+            if ((Comparator == null) || (Comparator is System.Collections.Comparer))
+            {
+                try
+                {
+                    ((System.Collections.ArrayList)list).Sort();
+                }
+                catch (System.InvalidOperationException e)
+                {
+                    throw new System.InvalidCastException(e.Message);
+                }
+            }
+            else
+            {
+                try
+                {
+                    ((System.Collections.ArrayList)list).Sort(Comparator);
+                }
+                catch (System.InvalidOperationException e)
+                {
+                    throw new System.InvalidCastException(e.Message);
+                }
+            }
+        }
+
+        /// <summary>
+        /// Fills the array with an specific value from an specific index to an specific index.
+        /// </summary>
+        /// <param name="array">The array to be filled.</param>
+        /// <param name="fromindex">The first index to be filled.</param>
+        /// <param name="toindex">The last index to be filled.</param>
+        /// <param name="val">The value to fill the array with.</param>
+        public static void Fill(System.Array array, System.Int32 fromindex, System.Int32 toindex, System.Object val)
+        {
+            System.Object Temp_Object = val;
+            System.Type elementtype = array.GetType().GetElementType();
+            if (elementtype != val.GetType())
+                Temp_Object = System.Convert.ChangeType(val, elementtype);
+            if (array.Length == 0)
+                throw (new System.NullReferenceException());
+            if (fromindex > toindex)
+                throw (new System.ArgumentException());
+            if ((fromindex < 0) || ((System.Array)array).Length < toindex)
+                throw (new System.IndexOutOfRangeException());
+            for (int index = (fromindex > 0) ? fromindex-- : fromindex; index < toindex; index++)
+                array.SetValue(Temp_Object, index);
+        }
+
+
+        /// <summary>
+        /// Fills the array with an specific value.
+        /// </summary>
+        /// <param name="array">The array to be filled.</param>
+        /// <param name="val">The value to fill the array with.</param>
+        public static void Fill(System.Array array, System.Object val)
+        {
+            Fill(array, 0, array.Length, val);
+        }
+
+        /// <summary>
+        /// Compares the entire members of one array whith the other one.
+        /// </summary>
+        /// <param name="array1">The array to be compared.</param>
+        /// <param name="array2">The array to be compared with.</param>
+        /// <returns>Returns true if the two specified arrays of Objects are equal 
+        /// to one another. The two arrays are considered equal if both arrays 
+        /// contain the same number of elements, and all corresponding pairs of 
+        /// elements in the two arrays are equal. Two objects e1 and e2 are 
+        /// considered equal if (e1==null ? e2==null : e1.equals(e2)). In other 
+        /// words, the two arrays are equal if they contain the same elements in 
+        /// the same order. Also, two array references are considered equal if 
+        /// both are null.</returns>
+        public static bool Equals(System.Array array1, System.Array array2)
+        {
+            bool result = false;
+            if ((array1 == null) && (array2 == null))
+                result = true;
+            else if ((array1 != null) && (array2 != null))
+            {
+                if (array1.Length == array2.Length)
+                {
+                    int length = array1.Length;
+                    result = true;
+                    for (int index = 0; index < length; index++)
+                    {
+                        System.Object o1 = array1.GetValue(index);
+                        System.Object o2 = array2.GetValue(index);
+                        if (o1 == null && o2 == null)
+                            continue;   // they match
+                        else if (o1 == null || !o1.Equals(o2))
+                        {
+                            result = false;
+                            break;
+                        }
+                    }
+                }
+            }
+            return result;
+        }
+    }
+
+    /// <summary>A collection of <typeparamref name="TItem"/> which can be
+    /// looked up by instances of <typeparamref name="TKey"/>.</summary>
+    /// <typeparam name="TItem">The type of the items contains in this
+    /// collection.</typeparam>
+    /// <typeparam name="TKey">The type of the keys that can be used to look
+    /// up the items.</typeparam>
+    internal class GeneralKeyedCollection<TKey, TItem> : System.Collections.ObjectModel.KeyedCollection<TKey, TItem>
+    {
+        /// <summary>Creates a new instance of the
+        /// <see cref="GeneralKeyedCollection"/> class.</summary>
+        /// <param name="converter">The <see cref="Converter{TInput, TOutput}"/> which will convert
+        /// instances of <typeparamref name="TItem"/> to <typeparamref name="TKey"/>
+        /// when the override of <see cref="GetKeyForItem(TItem)"/> is called.</param>
+        internal GeneralKeyedCollection(Converter<TItem, TKey> converter) : base()
+        {
+            // If the converter is null, throw an exception.
+            if (converter == null) throw new ArgumentNullException("converter");
+
+            // Store the converter.
+            this.converter = converter;
+
+            // That's all folks.
+            return;
+        }
+
+        /// <summary>The <see cref="Converter{TInput, TOutput}"/> which will convert
+        /// instances of <typeparamref name="TItem"/> to <typeparamref name="TKey"/>
+        /// when the override of <see cref="GetKeyForItem(TItem)"/> is called.</summary>
+        private readonly Converter<TItem, TKey> converter;
+
+        /// <summary>Converts an item that is added to the collection to
+        /// a key.</summary>
+        /// <param name="item">The instance of <typeparamref name="TItem"/>
+        /// to convert into an instance of <typeparamref name="TKey"/>.</param>
+        /// <returns>The instance of <typeparamref name="TKey"/> which is the
+        /// key for this item.</returns>
+        protected override TKey GetKeyForItem(TItem item)
+        {
+            // The converter is not null.
+            System.Diagnostics.Debug.Assert(converter != null);
+
+            // Call the converter.
+            return converter(item);
+        }
+
+        /// <summary>Determines if a key for an item exists in this
+        /// collection.</summary>
+        /// <param name="key">The instance of <typeparamref name="TKey"/>
+        /// to see if it exists in this collection.</param>
+        /// <returns>True if the key exists in the collection, false otherwise.</returns>
+        public bool ContainsKey(TKey key)
+        {
+            // Call the dictionary - it is lazily created when the first item is added
+            if (Dictionary != null)
+            {
+                return Dictionary.ContainsKey(key);
+            }
+            else
+            {
+                return false;
+            }
+        }
+    }
+
+    /// <summary>Represents a strongly typed list of objects that can be accessed by index.
+    /// Provides methods to search, sort, and manipulate lists. Also provides functionality
+    /// to compare lists against each other through an implementations of
+    /// <see cref="IEquatable{T}"/>.</summary>
+    /// <typeparam name="T">The type of elements in the list.</typeparam>
+    [Serializable]
+    public class EquatableList<T> : System.Collections.Generic.List<T>,
+        IEquatable<System.Collections.Generic.IEnumerable<T>>,
+        ICloneable
+    {
+        /// <summary>Initializes a new instance of the 
+        /// <see cref="ComparableList{T}"/> class that is empty and has the 
+        /// default initial capacity.</summary>
+        public EquatableList() : base() { }
+
+        /// <summary>Initializes a new instance of the <see cref="ComparableList{T}"/>
+        /// class that contains elements copied from the specified collection and has
+        /// sufficient capacity to accommodate the number of elements copied.</summary>
+        /// <param name="collection">The collection whose elements are copied to the new list.</param>
+        public EquatableList(System.Collections.Generic.IEnumerable<T> collection) : base(collection) { }
+
+        /// <summary>Initializes a new instance of the <see cref="ComparableList{T}"/> 
+        /// class that is empty and has the specified initial capacity.</summary>
+        /// <param name="capacity">The number of elements that the new list can initially store.</param>
+        public EquatableList(int capacity) : base(capacity) { }
+
+        /// <summary>Adds a range of objects represented by the <see cref="ICollection"/>
+        /// implementation.</summary>
+        /// <param name="c">The <see cref="ICollection"/>
+        /// implementation to add to this list.</param>
+        public void AddRange(ICollection c)
+        {
+            // If the collection is null, throw an exception.
+            if (c == null) throw new ArgumentNullException("c");
+
+            // Pre-compute capacity.
+            Capacity = Math.Max(c.Count + Count, Capacity);
+
+            // Cycle through the items and add.
+            foreach (T item in c)
+            {
+                // Add the item.
+                Add(item);
+            }
+        }
+
+        /// <summary>Compares the counts of two <see cref="IEnumerable{T}"/>
+        /// implementations.</summary>
+        /// <remarks>This uses a trick in LINQ, sniffing types for implementations
+        /// of interfaces that might supply shortcuts when trying to make comparisons.
+        /// In this case, that is the <see cref="ICollection{T}"/> and
+        /// <see cref="ICollection"/> interfaces, either of which can provide a count
+        /// which can be used in determining the equality of sequences (if they don't have
+        /// the same count, then they can't be equal).</remarks>
+        /// <param name="x">The <see cref="IEnumerable{T}"/> from the left hand side of the
+        /// comparison to check the count of.</param>
+        /// <param name="y">The <see cref="IEnumerable{T}"/> from the right hand side of the
+        /// comparison to check the count of.</param>
+        /// <returns>Null if the result is indeterminate.  This occurs when either <paramref name="x"/>
+        /// or <paramref name="y"/> doesn't implement <see cref="ICollection"/> or <see cref="ICollection{T}"/>.
+        /// Otherwise, it will get the count from each and return true if they are equal, false otherwise.</returns>
+        private static bool? EnumerableCountsEqual(System.Collections.Generic.IEnumerable<T> x, System.Collections.Generic.IEnumerable<T> y)
+        {
+            // Get the ICollection<T> and ICollection interfaces.
+            System.Collections.Generic.ICollection<T> xOfTCollection = x as System.Collections.Generic.ICollection<T>;
+            System.Collections.Generic.ICollection<T> yOfTCollection = y as System.Collections.Generic.ICollection<T>;
+            ICollection xCollection = x as ICollection;
+            ICollection yCollection = y as ICollection;
+
+            // The count in x and y.
+            int? xCount = xOfTCollection != null ? xOfTCollection.Count : xCollection != null ? xCollection.Count : (int?)null;
+            int? yCount = yOfTCollection != null ? yOfTCollection.Count : yCollection != null ? yCollection.Count : (int?)null;
+
+            // If either are null, return null, the result is indeterminate.
+            if (xCount == null || yCount == null)
+            {
+                // Return null, indeterminate.
+                return null;
+            }
+
+            // Both counts are non-null, compare.
+            return xCount == yCount;
+        }
+
+        /// <summary>Compares the contents of a <see cref="IEnumerable{T}"/>
+        /// implementation to another one to determine equality.</summary>
+        /// <remarks>Thinking of the <see cref="IEnumerable{T}"/> implementation as
+        /// a string with any number of characters, the algorithm checks
+        /// each item in each list.  If any item of the list is not equal (or
+        /// one list contains all the elements of another list), then that list
+        /// element is compared to the other list element to see which
+        /// list is greater.</remarks>
+        /// <param name="x">The <see cref="IEnumerable{T}"/> implementation
+        /// that is considered the left hand side.</param>
+        /// <param name="y">The <see cref="IEnumerable{T}"/> implementation
+        /// that is considered the right hand side.</param>
+        /// <returns>True if the items are equal, false otherwise.</returns>
+        private static bool Equals(System.Collections.Generic.IEnumerable<T> x,
+            System.Collections.Generic.IEnumerable<T> y)
+        {
+            // If x and y are null, then return true, they are the same.
+            if (x == null && y == null)
+            {
+                // They are the same, return 0.
+                return true;
+            }
+
+            // If one is null, then return a value based on whether or not
+            // one is null or not.
+            if (x == null || y == null)
+            {
+                // Return false, one is null, the other is not.
+                return false;
+            }
+
+            // Check to see if the counts on the IEnumerable implementations are equal.
+            // This is a shortcut, if they are not equal, then the lists are not equal.
+            // If the result is indeterminate, then get out.
+            bool? enumerableCountsEqual = EnumerableCountsEqual(x, y);
+
+            // If the enumerable counts have been able to be calculated (indicated by
+            // a non-null value) and it is false, then no need to iterate through the items.
+            if (enumerableCountsEqual != null && !enumerableCountsEqual.Value)
+            {
+                // The sequences are not equal.
+                return false;
+            }
+
+            // The counts of the items in the enumerations are equal, or indeterminate
+            // so a full iteration needs to be made to compare each item.
+            // Get the default comparer for T first.
+            System.Collections.Generic.EqualityComparer<T> defaultComparer =
+                System.Collections.Generic.EqualityComparer<T>.Default;
+
+            // Get the enumerator for y.
+            System.Collections.Generic.IEnumerator<T> otherEnumerator = y.GetEnumerator();
+
+            // Call Dispose on IDisposable if there is an implementation on the
+            // IEnumerator<T> returned by a call to y.GetEnumerator().
+            using (otherEnumerator as IDisposable)
+            {
+                // Cycle through the items in this list.
+                foreach (T item in x)
+                {
+                    // If there isn't an item to get, then this has more
+                    // items than that, they are not equal.
+                    if (!otherEnumerator.MoveNext())
+                    {
+                        // Return false.
+                        return false;
+                    }
+
+                    // Perform a comparison.  Must check this on the left hand side
+                    // and that on the right hand side.
+                    bool comparison = defaultComparer.Equals(item, otherEnumerator.Current);
+
+                    // If the value is false, return false.
+                    if (!comparison)
+                    {
+                        // Return the value.
+                        return comparison;
+                    }
+                }
+
+                // If there are no more items, then return true, the sequences
+                // are equal.
+                if (!otherEnumerator.MoveNext())
+                {
+                    // The sequences are equal.
+                    return true;
+                }
+
+                // The other sequence has more items than this one, return
+                // false, these are not equal.
+                return false;
+            }
+        }
+
+#region IEquatable<IEnumerable<T>> Members
+        /// <summary>Compares this sequence to another <see cref="IEnumerable{T}"/>
+        /// implementation, returning true if they are equal, false otherwise.</summary>
+        /// <param name="other">The other <see cref="IEnumerable{T}"/> implementation
+        /// to compare against.</param>
+        /// <returns>True if the sequence in <paramref name="other"/> 
+        /// is the same as this one.</returns>
+        public bool Equals(System.Collections.Generic.IEnumerable<T> other)
+        {
+            // Compare to the other sequence.  If 0, then equal.
+            return Equals(this, other);
+        }
+#endregion
+
+        /// <summary>Compares this object for equality against other.</summary>
+        /// <param name="obj">The other object to compare this object against.</param>
+        /// <returns>True if this object and <paramref name="obj"/> are equal, false
+        /// otherwise.</returns>
+        public override bool Equals(object obj)
+        {
+            // Call the strongly typed version.
+            return Equals(obj as System.Collections.Generic.IEnumerable<T>);
+        }
+
+        /// <summary>Gets the hash code for the list.</summary>
+        /// <returns>The hash code value.</returns>
+        public override int GetHashCode()
+        {
+            // Call the static method, passing this.
+            return GetHashCode(this);
+        }
+
+        #if __MonoCS__
+        public static int GetHashCode<T>(System.Collections.Generic.IEnumerable<T> source)
+        #else
+        /// <summary>Gets the hash code for the list.</summary>
+        /// <param name="source">The <see cref="IEnumerable{T}"/>
+        /// <param name="source">The <see cref="IEnumerable<T>"/>
+        /// implementation which will have all the contents hashed.</param>
+        /// <returns>The hash code value.</returns>
+        public static int GetHashCode(System.Collections.Generic.IEnumerable<T> source)
+        #endif
+        {
+            // If source is null, then return 0.
+            if (source == null) return 0;
+
+            // Seed the hash code with the hash code of the type.
+            // This is done so that you don't have a lot of collisions of empty
+            // ComparableList instances when placed in dictionaries
+            // and things that rely on hashcodes.
+            int hashCode = typeof(T).GetHashCode();
+
+            // Iterate through the items in this implementation.
+            foreach (T item in source)
+            {
+                // Adjust the hash code.
+                hashCode = 31 * hashCode + (item == null ? 0 : item.GetHashCode());
+            }
+
+            // Return the hash code.
+            return hashCode;
+        }
+
+        // TODO: When diverging from Java version of Lucene, can uncomment these to adhere to best practices when overriding the Equals method and implementing IEquatable<T>.
+        ///// <summary>Overload of the == operator, it compares a
+        ///// <see cref="ComparableList{T}"/> to an <see cref="IEnumerable{T}"/>
+        ///// implementation.</summary>
+        ///// <param name="x">The <see cref="ComparableList{T}"/> to compare
+        ///// against <paramref name="y"/>.</param>
+        ///// <param name="y">The <see cref="IEnumerable{T}"/> to compare
+        ///// against <paramref name="x"/>.</param>
+        ///// <returns>True if the instances are equal, false otherwise.</returns>
+        //public static bool operator ==(EquatableList<T> x, System.Collections.Generic.IEnumerable<T> y)
+        //{
+        //    // Call Equals.
+        //    return Equals(x, y);
+        //}
+
+        ///// <summary>Overload of the == operator, it compares a
+        ///// <see cref="ComparableList{T}"/> to an <see cref="IEnumerable{T}"/>
+        ///// implementation.</summary>
+        ///// <param name="y">The <see cref="ComparableList{T}"/> to compare
+        ///// against <paramref name="x"/>.</param>
+        ///// <param name="x">The <see cref="IEnumerable{T}"/> to compare
+        ///// against <paramref name="y"/>.</param>
+        ///// <returns>True if the instances are equal, false otherwise.</returns>
+        //public static bool operator ==(System.Collections.Generic.IEnumerable<T> x, EquatableList<T> y)
+        //{
+        //    // Call equals.
+        //    return Equals(x, y);
+        //}
+
+        ///// <summary>Overload of the != operator, it compares a
+        ///// <see cref="ComparableList{T}"/> to an <see cref="IEnumerable{T}"/>
+        ///// implementation.</summary>
+        ///// <param name="x">The <see cref="ComparableList{T}"/> to compare
+        ///// against <paramref name="y"/>.</param>
+        ///// <param name="y">The <see cref="IEnumerable{T}"/> to compare
+        ///// against <paramref name="x"/>.</param>
+        ///// <returns>True if the instances are not equal, false otherwise.</returns>
+        //public static bool operator !=(EquatableList<T> x, System.Collections.Generic.IEnumerable<T> y)
+        //{
+        //    // Return the negative of the equals operation.
+        //    return !(x == y);
+        //}
+
+        ///// <summary>Overload of the != operator, it compares a
+        ///// <see cref="ComparableList{T}"/> to an <see cref="IEnumerable{T}"/>
+        ///// implementation.</summary>
+        ///// <param name="y">The <see cref="ComparableList{T}"/> to compare
+        ///// against <paramref name="x"/>.</param>
+        ///// <param name="x">The <see cref="IEnumerable{T}"/> to compare
+        ///// against <paramref name="y"/>.</param>
+        ///// <returns>True if the instances are not equal, false otherwise.</returns>
+        //public static bool operator !=(System.Collections.Generic.IEnumerable<T> x, EquatableList<T> y)
+        //{
+        //    // Return the negative of the equals operation.
+        //    return !(x == y);
+        //}
+
+        #region ICloneable Members
+
+        /// <summary>Clones the <see cref="EquatableList{T}"/>.</summary>
+        /// <remarks>This is a shallow clone.</remarks>
+        /// <returns>A new shallow clone of this
+        /// <see cref="EquatableList{T}"/>.</returns>
+        public object Clone()
+        {
+            // Just create a new one, passing this to the constructor.
+            return new EquatableList<T>(this);
+        }
+
+        #endregion
+    }
+
+    /// <summary>
+    /// A simple wrapper to allow for the use of the GeneralKeyedCollection.  The
+    /// wrapper is required as there can be several keys for an object depending
+    /// on how many interfaces it implements.
+    /// </summary>
+    internal sealed class AttributeImplItem
+    {
+        internal AttributeImplItem(Type key, Mono.Lucene.Net.Util.AttributeImpl value)
+        {
+            this.Key = key;
+            this.Value = value;
+        }
+        internal Type Key;
+        internal Mono.Lucene.Net.Util.AttributeImpl Value;
+    }
+
+    /// <summary>
+    /// Provides platform infos.
+    /// </summary>
+    public class OS
+    {
+        static bool isUnix;
+        static bool isWindows;
+
+        static OS()
+        {
+            PlatformID pid = Environment.OSVersion.Platform;
+            isWindows = pid == PlatformID.Win32NT || pid == PlatformID.Win32Windows;
+
+            // we use integers instead of enum tags because "MacOS"
+            // requires 2.0 SP2, 3.0 SP2 or 3.5 SP1.
+            // 128 is mono's old platform tag for Unix.
+            int id = (int)pid;
+            isUnix = id == 4 || id == 6 || id == 128;
+        }
+
+        /// <summary>
+        /// Whether we run under a Unix platform.
+        /// </summary>
+        public static bool IsUnix
+        {
+            get { return isUnix; }
+        }
+
+        /// <summary>
+        /// Whether we run under a supported Windows platform.
+        /// </summary>
+        public static bool IsWindows
+        {
+            get { return isWindows; }
+        }
+    }
+
+    public class SharpZipLib
+    {
+        static System.Reflection.Assembly asm = null;
+
+        static SharpZipLib()
+        {
+            try
+            {
+                asm = System.Reflection.Assembly.Load("ICSharpCode.SharpZipLib");
+            }
+            catch{}
+        }
+
+        public static Deflater CreateDeflater()
+        {
+            if (asm == null) throw new System.IO.FileNotFoundException("Can not load ICSharpCode.SharpZipLib.dll"); 
+            return new Deflater(asm.CreateInstance("ICSharpCode.SharpZipLib.Zip.Compression.Deflater"));
+        }
+
+        public static Inflater CreateInflater()
+        {
+            if (asm == null) throw new System.IO.FileNotFoundException("Can not load ICSharpCode.SharpZipLib.dll");
+            return new Inflater(asm.CreateInstance("ICSharpCode.SharpZipLib.Zip.Compression.Inflater"));
+        }
+
+
+        public class Inflater
+        {
+            delegate void SetInputDelegate(byte[] buffer);
+            delegate bool GetIsFinishedDelegate();
+            delegate int InflateDelegate(byte[] buffer);
+
+            SetInputDelegate setInputMethod;
+            GetIsFinishedDelegate getIsFinishedMethod;
+            InflateDelegate inflateMethod;
+
+            internal Inflater(object inflaterInstance)
+            {
+                Type type = inflaterInstance.GetType();
+
+                setInputMethod = (SetInputDelegate)Delegate.CreateDelegate(
+                    typeof(SetInputDelegate),
+                    inflaterInstance,
+                    type.GetMethod("SetInput", new Type[] { typeof(byte[]) }));
+
+                getIsFinishedMethod = (GetIsFinishedDelegate)Delegate.CreateDelegate(
+                    typeof(GetIsFinishedDelegate),
+                    inflaterInstance,
+                    type.GetMethod("get_IsFinished", Type.EmptyTypes));
+
+                inflateMethod = (InflateDelegate)Delegate.CreateDelegate(
+                    typeof(InflateDelegate),
+                    inflaterInstance,
+                    type.GetMethod("Inflate", new Type[] { typeof(byte[]) }));
+            }
+
+            public void SetInput(byte[] buffer)
+            {
+                setInputMethod(buffer);
+            }
+
+            public bool IsFinished
+            {
+                get { return getIsFinishedMethod(); }
+            }
+
+            public int Inflate(byte[] buffer)
+            {
+                return inflateMethod(buffer);
+            }
+        }
+
+
+        public class Deflater 
+        {
+            delegate void SetLevelDelegate(int level);
+            delegate void SetInputDelegate(byte[] input, int offset, int count);
+            delegate void FinishDelegate();
+            delegate bool GetIsFinishedDelegate();
+            delegate int DeflateDelegate(byte[] output);
+
+            SetLevelDelegate setLevelMethod;
+            SetInputDelegate setInputMethod;
+            FinishDelegate finishMethod;
+            GetIsFinishedDelegate getIsFinishedMethod;
+            DeflateDelegate deflateMethod;
+
+            public const int BEST_COMPRESSION = 9;
+
+            internal Deflater(object deflaterInstance)
+            {
+                Type type = deflaterInstance.GetType();
+
+                setLevelMethod = (SetLevelDelegate)Delegate.CreateDelegate(
+                    typeof(SetLevelDelegate),
+                    deflaterInstance,
+                    type.GetMethod("SetLevel", new Type[] { typeof(int) }));
+
+                setInputMethod = (SetInputDelegate)Delegate.CreateDelegate(
+                    typeof(SetInputDelegate),
+                    deflaterInstance,
+                    type.GetMethod("SetInput", new Type[] { typeof(byte[]), typeof(int), typeof(int) }));
+
+                finishMethod = (FinishDelegate)Delegate.CreateDelegate(
+                    typeof(FinishDelegate),
+                    deflaterInstance,
+                    type.GetMethod("Finish", Type.EmptyTypes));
+
+                getIsFinishedMethod = (GetIsFinishedDelegate)Delegate.CreateDelegate(
+                    typeof(GetIsFinishedDelegate),
+                    deflaterInstance,
+                    type.GetMethod("get_IsFinished", Type.EmptyTypes));
+
+                deflateMethod = (DeflateDelegate)Delegate.CreateDelegate(
+                    typeof(DeflateDelegate),
+                    deflaterInstance,
+                    type.GetMethod("Deflate", new Type[] { typeof(byte[]) }));
+            }
+            
+            public void SetLevel(int level)
+            {
+                setLevelMethod(level);
+            }
+
+            public void SetInput(byte[] input, int offset, int count)
+            {
+                setInputMethod(input, offset, count);
+            }
+
+            public void Finish()
+            {
+                finishMethod();
+            }
+
+            public bool IsFinished
+            {
+                get { return getIsFinishedMethod(); }
+            }
+
+            public int Deflate(byte[] output)
+            {
+                return deflateMethod(output);
+            }
+        }
+    }
+
+    /// <summary>
+    /// For Debuging purposes.
+    /// </summary>
+    public class CloseableThreadLocalProfiler
+    {
+        public static bool _EnableCloseableThreadLocalProfiler = false;
+        public static System.Collections.Generic.List<WeakReference> Instances = new System.Collections.Generic.List<WeakReference>();
+
+        public static bool EnableCloseableThreadLocalProfiler
+        {
+            get { return _EnableCloseableThreadLocalProfiler; }
+            set
+            {
+                _EnableCloseableThreadLocalProfiler = value;
+                lock (Instances)
+                    Instances.Clear();
+            }
+        }
+    }
+
+    public class BuildType
+    {
+#if DEBUG
+       public static bool Debug = true;
+#else
+        public static bool Debug = false;
+#endif
+    }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/.gitattributes b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/.gitattributes
new file mode 100644 (file)
index 0000000..43af684
--- /dev/null
@@ -0,0 +1,34 @@
+/ArrayUtil.cs -crlf
+/Attribute.cs -crlf
+/AttributeImpl.cs -crlf
+/AttributeSource.cs -crlf
+/AverageGuessMemoryModel.cs -crlf
+/BitUtil.cs -crlf
+/BitVector.cs -crlf
+/Cache -crlf
+/CloseableThreadLocal.cs -crlf
+/Constants.cs -crlf
+/DocIdBitSet.cs -crlf
+/FieldCacheSanityChecker.cs -crlf
+/IndexableBinaryStringTools.cs -crlf
+/MapOfSets.cs -crlf
+/MemoryModel.cs -crlf
+/NumericUtils.cs -crlf
+/OpenBitSet.cs -crlf
+/OpenBitSetDISI.cs -crlf
+/OpenBitSetIterator.cs -crlf
+/Package.html -crlf
+/Parameter.cs -crlf
+/PriorityQueue.cs -crlf
+/RamUsageEstimator.cs -crlf
+/ReaderUtil.cs -crlf
+/ScorerDocQueue.cs -crlf
+/SimpleStringInterner.cs -crlf
+/SmallFloat.cs -crlf
+/SortedVIntList.cs -crlf
+/SorterTemplate.cs -crlf
+/StringHelper.cs -crlf
+/StringInterner.cs -crlf
+/ToStringUtils.cs -crlf
+/UnicodeUtil.cs -crlf
+/Version.cs -crlf
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/ArrayUtil.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/ArrayUtil.cs
new file mode 100644 (file)
index 0000000..d04490d
--- /dev/null
@@ -0,0 +1,280 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Util
+{
+       
+       /// <summary> Methods for manipulating arrays.</summary>
+       public sealed class ArrayUtil
+       {
+               /*
+               Begin Apache Harmony code
+               
+               Revision taken on Friday, June 12. https://svn.apache.org/repos/asf/harmony/enhanced/classlib/archive/java6/modules/luni/src/main/java/java/lang/Integer.java
+               
+               */
+               
+               /// <summary> Parses the string argument as if it was an int value and returns the
+               /// result. Throws NumberFormatException if the string does not represent an
+               /// int quantity.
+               /// 
+               /// </summary>
+               /// <param name="chars">a string representation of an int quantity.
+               /// </param>
+               /// <returns> int the value represented by the argument
+               /// </returns>
+               /// <throws>  NumberFormatException if the argument could not be parsed as an int quantity. </throws>
+               public static int ParseInt(char[] chars)
+               {
+                       return ParseInt(chars, 0, chars.Length, 10);
+               }
+               
+               /// <summary> Parses a char array into an int.</summary>
+               /// <param name="chars">the character array
+               /// </param>
+               /// <param name="offset">The offset into the array
+               /// </param>
+               /// <param name="len">The length
+               /// </param>
+               /// <returns> the int
+               /// </returns>
+               /// <throws>  NumberFormatException if it can't parse </throws>
+               public static int ParseInt(char[] chars, int offset, int len)
+               {
+                       return ParseInt(chars, offset, len, 10);
+               }
+               
+               /// <summary> Parses the string argument as if it was an int value and returns the
+               /// result. Throws NumberFormatException if the string does not represent an
+               /// int quantity. The second argument specifies the radix to use when parsing
+               /// the value.
+               /// 
+               /// </summary>
+               /// <param name="chars">a string representation of an int quantity.
+               /// </param>
+               /// <param name="radix">the base to use for conversion.
+               /// </param>
+               /// <returns> int the value represented by the argument
+               /// </returns>
+               /// <throws>  NumberFormatException if the argument could not be parsed as an int quantity. </throws>
+               public static int ParseInt(char[] chars, int offset, int len, int radix)
+               {
+                       if (chars == null || radix < 2 || radix > 36)
+                       {
+                               throw new System.FormatException();
+                       }
+                       int i = 0;
+                       if (len == 0)
+                       {
+                               throw new System.FormatException("chars length is 0");
+                       }
+                       bool negative = chars[offset + i] == '-';
+                       if (negative && ++i == len)
+                       {
+                               throw new System.FormatException("can't convert to an int");
+                       }
+                       if (negative == true)
+                       {
+                               offset++;
+                               len--;
+                       }
+                       return Parse(chars, offset, len, radix, negative);
+               }
+               
+               
+               private static int Parse(char[] chars, int offset, int len, int radix, bool negative)
+               {
+                       int max = System.Int32.MinValue / radix;
+                       int result = 0;
+                       for (int i = 0; i < len; i++)
+                       {
+                               int digit = (int) System.Char.GetNumericValue(chars[i + offset]);
+                               if (digit == - 1)
+                               {
+                                       throw new System.FormatException("Unable to parse");
+                               }
+                               if (max > result)
+                               {
+                                       throw new System.FormatException("Unable to parse");
+                               }
+                               int next = result * radix - digit;
+                               if (next > result)
+                               {
+                                       throw new System.FormatException("Unable to parse");
+                               }
+                               result = next;
+                       }
+                       /*while (offset < len) {
+                       
+                       }*/
+                       if (!negative)
+                       {
+                               result = - result;
+                               if (result < 0)
+                               {
+                                       throw new System.FormatException("Unable to parse");
+                               }
+                       }
+                       return result;
+               }
+               
+               
+               /*
+               
+               END APACHE HARMONY CODE
+               */
+               
+               
+               public static int GetNextSize(int targetSize)
+               {
+                       /* This over-allocates proportional to the list size, making room
+                       * for additional growth.  The over-allocation is mild, but is
+                       * enough to give linear-time amortized behavior over a long
+                       * sequence of appends() in the presence of a poorly-performing
+                       * system realloc().
+                       * The growth pattern is:  0, 4, 8, 16, 25, 35, 46, 58, 72, 88, ...
+                       */
+                       return (targetSize >> 3) + (targetSize < 9?3:6) + targetSize;
+               }
+               
+               public static int GetShrinkSize(int currentSize, int targetSize)
+               {
+                       int newSize = GetNextSize(targetSize);
+                       // Only reallocate if we are "substantially" smaller.
+                       // This saves us from "running hot" (constantly making a
+                       // bit bigger then a bit smaller, over and over):
+                       if (newSize < currentSize / 2)
+                               return newSize;
+                       else
+                               return currentSize;
+               }
+               
+               public static int[] Grow(int[] array, int minSize)
+               {
+                       if (array.Length < minSize)
+                       {
+                               int[] newArray = new int[GetNextSize(minSize)];
+                               Array.Copy(array, 0, newArray, 0, array.Length);
+                               return newArray;
+                       }
+                       else
+                               return array;
+               }
+               
+               public static int[] Grow(int[] array)
+               {
+                       return Grow(array, 1 + array.Length);
+               }
+               
+               public static int[] Shrink(int[] array, int targetSize)
+               {
+                       int newSize = GetShrinkSize(array.Length, targetSize);
+                       if (newSize != array.Length)
+                       {
+                               int[] newArray = new int[newSize];
+                               Array.Copy(array, 0, newArray, 0, newSize);
+                               return newArray;
+                       }
+                       else
+                               return array;
+               }
+               
+               public static long[] Grow(long[] array, int minSize)
+               {
+                       if (array.Length < minSize)
+                       {
+                               long[] newArray = new long[GetNextSize(minSize)];
+                               Array.Copy(array, 0, newArray, 0, array.Length);
+                               return newArray;
+                       }
+                       else
+                               return array;
+               }
+               
+               public static long[] Grow(long[] array)
+               {
+                       return Grow(array, 1 + array.Length);
+               }
+               
+               public static long[] Shrink(long[] array, int targetSize)
+               {
+                       int newSize = GetShrinkSize(array.Length, targetSize);
+                       if (newSize != array.Length)
+                       {
+                               long[] newArray = new long[newSize];
+                               Array.Copy(array, 0, newArray, 0, newSize);
+                               return newArray;
+                       }
+                       else
+                               return array;
+               }
+               
+               public static byte[] Grow(byte[] array, int minSize)
+               {
+                       if (array.Length < minSize)
+                       {
+                               byte[] newArray = new byte[GetNextSize(minSize)];
+                               Array.Copy(array, 0, newArray, 0, array.Length);
+                               return newArray;
+                       }
+                       else
+                               return array;
+               }
+               
+               public static byte[] Grow(byte[] array)
+               {
+                       return Grow(array, 1 + array.Length);
+               }
+               
+               public static byte[] Shrink(byte[] array, int targetSize)
+               {
+                       int newSize = GetShrinkSize(array.Length, targetSize);
+                       if (newSize != array.Length)
+                       {
+                               byte[] newArray = new byte[newSize];
+                               Array.Copy(array, 0, newArray, 0, newSize);
+                               return newArray;
+                       }
+                       else
+                               return array;
+               }
+               
+               /// <summary> Returns hash of chars in range start (inclusive) to
+               /// end (inclusive)
+               /// </summary>
+               public static int HashCode(char[] array, int start, int end)
+               {
+                       int code = 0;
+                       for (int i = end - 1; i >= start; i--)
+                               code = code * 31 + array[i];
+                       return code;
+               }
+               
+               /// <summary> Returns hash of chars in range start (inclusive) to
+               /// end (inclusive)
+               /// </summary>
+               public static int HashCode(byte[] array, int start, int end)
+               {
+                       int code = 0;
+                       for (int i = end - 1; i >= start; i--)
+                               code = code * 31 + array[i];
+                       return code;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/Attribute.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/Attribute.cs
new file mode 100644 (file)
index 0000000..d760e0a
--- /dev/null
@@ -0,0 +1,27 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Util
+{
+       
+       /// <summary> Base interface for attributes.</summary>
+       public interface Attribute
+       {
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/AttributeImpl.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/AttributeImpl.cs
new file mode 100644 (file)
index 0000000..8b19e51
--- /dev/null
@@ -0,0 +1,131 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Util
+{
+       
+       /// <summary> Base class for Attributes that can be added to a 
+       /// {@link Mono.Lucene.Net.Util.AttributeSource}.
+       /// <p/>
+       /// Attributes are used to add data in a dynamic, yet type-safe way to a source
+       /// of usually streamed objects, e. g. a {@link Mono.Lucene.Net.Analysis.TokenStream}.
+       /// </summary>
+       [Serializable]
+       public abstract class AttributeImpl : System.ICloneable, Attribute
+       {
+               /// <summary> Clears the values in this AttributeImpl and resets it to its 
+               /// default value. If this implementation implements more than one Attribute interface
+               /// it clears all.
+               /// </summary>
+               public abstract void  Clear();
+               
+               /// <summary> The default implementation of this method accesses all declared
+               /// fields of this object and prints the values in the following syntax:
+               /// 
+               /// <pre>
+               /// public String toString() {
+               /// return "start=" + startOffset + ",end=" + endOffset;
+               /// }
+               /// </pre>
+               /// 
+               /// This method may be overridden by subclasses.
+               /// </summary>
+               public override System.String ToString()
+               {
+                       System.Text.StringBuilder buffer = new System.Text.StringBuilder();
+                       System.Type clazz = this.GetType();
+                       System.Reflection.FieldInfo[] fields = clazz.GetFields(System.Reflection.BindingFlags.Instance | System.Reflection.BindingFlags.NonPublic | System.Reflection.BindingFlags.Public | System.Reflection.BindingFlags.DeclaredOnly | System.Reflection.BindingFlags.Static);
+                       try
+                       {
+                               for (int i = 0; i < fields.Length; i++)
+                               {
+                                       System.Reflection.FieldInfo f = fields[i];
+                                       if (f.IsStatic)
+                                               continue;
+                    //f.setAccessible(true);   // {{Aroush-2.9}} java.lang.reflect.AccessibleObject.setAccessible
+                                       System.Object value_Renamed = f.GetValue(this);
+                                       if (buffer.Length > 0)
+                                       {
+                                               buffer.Append(',');
+                                       }
+                                       if (value_Renamed == null)
+                                       {
+                                               buffer.Append(f.Name + "=null");
+                                       }
+                                       else
+                                       {
+                                               buffer.Append(f.Name + "=" + value_Renamed);
+                                       }
+                               }
+                       }
+                       catch (System.UnauthorizedAccessException e)
+                       {
+                               // this should never happen, because we're just accessing fields
+                               // from 'this'
+                               throw new System.SystemException(e.Message, e);
+                       }
+                       
+                       return buffer.ToString();
+               }
+               
+               /// <summary> Subclasses must implement this method and should compute
+               /// a hashCode similar to this:
+               /// <pre>
+               /// public int hashCode() {
+               /// int code = startOffset;
+               /// code = code * 31 + endOffset;
+               /// return code;
+               /// }
+               /// </pre> 
+               /// 
+               /// see also {@link #equals(Object)}
+               /// </summary>
+               abstract public override int GetHashCode();
+               
+               /// <summary> All values used for computation of {@link #hashCode()} 
+               /// should be checked here for equality.
+               /// 
+               /// see also {@link Object#equals(Object)}
+               /// </summary>
+               abstract public  override bool Equals(System.Object other);
+               
+               /// <summary> Copies the values from this Attribute into the passed-in
+               /// target attribute. The target implementation must support all the
+               /// Attributes this implementation supports.
+               /// </summary>
+               public abstract void  CopyTo(AttributeImpl target);
+               
+               /// <summary> Shallow clone. Subclasses must override this if they 
+               /// need to clone any members deeply,
+               /// </summary>
+               public virtual System.Object Clone()
+               {
+                       System.Object clone = null;
+                       try
+                       {
+                               clone = base.MemberwiseClone();
+                       }
+                       catch (System.Exception e)
+                       {
+                               throw new System.SystemException(e.Message, e); // shouldn't happen
+                       }
+                       return clone;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/AttributeSource.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/AttributeSource.cs
new file mode 100644 (file)
index 0000000..1a1bc03
--- /dev/null
@@ -0,0 +1,540 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using TokenStream = Mono.Lucene.Net.Analysis.TokenStream;
+
+namespace Mono.Lucene.Net.Util
+{
+       
+       /// <summary> An AttributeSource contains a list of different {@link AttributeImpl}s,
+       /// and methods to add and get them. There can only be a single instance
+       /// of an attribute in the same AttributeSource instance. This is ensured
+       /// by passing in the actual type of the Attribute (Class&lt;Attribute&gt;) to 
+       /// the {@link #AddAttribute(Class)}, which then checks if an instance of
+       /// that type is already present. If yes, it returns the instance, otherwise
+       /// it creates a new instance and returns it.
+       /// </summary>
+       public class AttributeSource
+       {
+               /// <summary> An AttributeFactory creates instances of {@link AttributeImpl}s.</summary>
+               public abstract class AttributeFactory
+               {
+                       /// <summary> returns an {@link AttributeImpl} for the supplied {@link Attribute} interface class.
+                       /// <p/>Signature for Java 1.5: <code>public AttributeImpl createAttributeInstance(Class%lt;? extends Attribute&gt; attClass)</code>
+                       /// </summary>
+                       public abstract AttributeImpl CreateAttributeInstance(System.Type attClass);
+                       
+                       /// <summary> This is the default factory that creates {@link AttributeImpl}s using the
+                       /// class name of the supplied {@link Attribute} interface class by appending <code>Impl</code> to it.
+                       /// </summary>
+                       public static readonly AttributeFactory DEFAULT_ATTRIBUTE_FACTORY = new DefaultAttributeFactory();
+                       
+                       private sealed class DefaultAttributeFactory:AttributeFactory
+                       {
+                private static readonly SupportClass.WeakHashTable attClassImplMap = new SupportClass.WeakHashTable();
+                
+                               internal DefaultAttributeFactory()
+                               {
+                               }
+                               
+                               public override AttributeImpl CreateAttributeInstance(System.Type attClass)
+                               {
+                                       try
+                                       {
+                                               return (AttributeImpl) System.Activator.CreateInstance(GetClassForInterface(attClass));
+                                       }
+                                       catch (System.UnauthorizedAccessException e)
+                    {
+                        throw new System.ArgumentException("Could not instantiate implementing class for " + attClass.FullName);
+                                       }
+                                       catch (System.Exception e)
+                                       {
+                        throw new System.ArgumentException("Could not instantiate implementing class for " + attClass.FullName);
+                                       }
+                               }
+                               
+                               private static System.Type GetClassForInterface(System.Type attClass)
+                               {
+                                       lock (attClassImplMap)
+                                       {
+                        WeakReference refz = (WeakReference) attClassImplMap[attClass];
+                        System.Type clazz = (refz == null) ? null : ((System.Type) refz.Target);
+                                               if (clazz == null)
+                                               {
+                                                       try
+                                                       {
+                                string name = attClass.FullName + "Impl," + attClass.Assembly.FullName;
+                                                               attClassImplMap.Add(attClass, new WeakReference( clazz = System.Type.GetType(name, true))); //OK
+                                                       }
+                                                       catch (System.Exception e)
+                                                       {
+                                                               throw new System.ArgumentException("Could not find implementing class for " + attClass.FullName);
+                                                       }
+                                               }
+                                               return clazz;
+                                       }
+                               }
+                       }
+               }
+               
+               // These two maps must always be in sync!!!
+               // So they are private, final and read-only from the outside (read-only iterators)
+               private SupportClass.GeneralKeyedCollection<Type, SupportClass.AttributeImplItem> attributes;
+               private SupportClass.GeneralKeyedCollection<Type, SupportClass.AttributeImplItem> attributeImpls;
+               
+               private AttributeFactory factory;
+               
+               /// <summary> An AttributeSource using the default attribute factory {@link AttributeSource.AttributeFactory#DEFAULT_ATTRIBUTE_FACTORY}.</summary>
+               public AttributeSource():this(AttributeFactory.DEFAULT_ATTRIBUTE_FACTORY)
+               {
+               }
+               
+               /// <summary> An AttributeSource that uses the same attributes as the supplied one.</summary>
+               public AttributeSource(AttributeSource input)
+               {
+                       if (input == null)
+                       {
+                               throw new System.ArgumentException("input AttributeSource must not be null");
+                       }
+                       this.attributes = input.attributes;
+                       this.attributeImpls = input.attributeImpls;
+                       this.factory = input.factory;
+               }
+               
+               /// <summary> An AttributeSource using the supplied {@link AttributeFactory} for creating new {@link Attribute} instances.</summary>
+               public AttributeSource(AttributeFactory factory)
+               {
+            this.attributes = new SupportClass.GeneralKeyedCollection<Type, SupportClass.AttributeImplItem>(delegate(SupportClass.AttributeImplItem att) { return att.Key; });
+            this.attributeImpls = new SupportClass.GeneralKeyedCollection<Type, SupportClass.AttributeImplItem>(delegate(SupportClass.AttributeImplItem att) { return att.Key; });
+                       this.factory = factory;
+               }
+               
+               /// <summary> returns the used AttributeFactory.</summary>
+               public virtual AttributeFactory GetAttributeFactory()
+               {
+                       return this.factory;
+               }
+               
+               /// <summary>Returns a new iterator that iterates the attribute classes
+               /// in the same order they were added in.
+               /// Signature for Java 1.5: <code>public Iterator&lt;Class&lt;? extends Attribute&gt;&gt; getAttributeClassesIterator()</code>
+               ///
+               /// Note that this return value is different from Java in that it enumerates over the values
+               /// and not the keys
+               /// </summary>
+               public virtual System.Collections.Generic.IEnumerable<Type> GetAttributeClassesIterator()
+               {
+            foreach (SupportClass.AttributeImplItem item in this.attributes)
+            {
+                yield return item.Key;
+            }
+               }
+               
+               /// <summary>Returns a new iterator that iterates all unique Attribute implementations.
+               /// This iterator may contain less entries that {@link #getAttributeClassesIterator},
+               /// if one instance implements more than one Attribute interface.
+               /// Signature for Java 1.5: <code>public Iterator&lt;AttributeImpl&gt; getAttributeImplsIterator()</code>
+               /// </summary>
+               public virtual System.Collections.Generic.IEnumerable<AttributeImpl> GetAttributeImplsIterator()
+               {
+                       if (HasAttributes())
+                       {
+                               if (currentState == null)
+                               {
+                                       ComputeCurrentState();
+                               }
+                while (currentState != null)
+                {
+                    AttributeImpl att = currentState.attribute;
+                    currentState = currentState.next;
+                    yield return att;
+                }
+                       }
+               }
+               
+               /// <summary>a cache that stores all interfaces for known implementation classes for performance (slow reflection) </summary>
+               private static readonly SupportClass.WeakHashTable knownImplClasses = new SupportClass.WeakHashTable();
+
+        // {{Aroush-2.9 Port issue, need to mimic java's IdentityHashMap
+        /*
+         * From Java docs:
+         * This class implements the Map interface with a hash table, using 
+         * reference-equality in place of object-equality when comparing keys 
+         * (and values). In other words, in an IdentityHashMap, two keys k1 and k2 
+         * are considered equal if and only if (k1==k2). (In normal Map 
+         * implementations (like HashMap) two keys k1 and k2 are considered 
+         * equal if and only if (k1==null ? k2==null : k1.equals(k2)).) 
+         */
+        // Aroush-2.9}}
+               
+               /// <summary>Adds a custom AttributeImpl instance with one or more Attribute interfaces. </summary>
+               public virtual void  AddAttributeImpl(AttributeImpl att)
+               {
+                       System.Type clazz = att.GetType();
+                       if (attributeImpls.Contains(clazz))
+                               return ;
+                       System.Collections.ArrayList foundInterfaces;
+                       lock (knownImplClasses)
+                       {
+                               foundInterfaces = (System.Collections.ArrayList) knownImplClasses[clazz];
+                               if (foundInterfaces == null)
+                               {
+                    // we have a strong reference to the class instance holding all interfaces in the list (parameter "att"),
+                    // so all WeakReferences are never evicted by GC
+                                       knownImplClasses.Add(clazz, foundInterfaces = new System.Collections.ArrayList());
+                                       // find all interfaces that this attribute instance implements
+                                       // and that extend the Attribute interface
+                                       System.Type actClazz = clazz;
+                                       do 
+                                       {
+                                               System.Type[] interfaces = actClazz.GetInterfaces();
+                                               for (int i = 0; i < interfaces.Length; i++)
+                                               {
+                                                       System.Type curInterface = interfaces[i];
+                                                       if (curInterface != typeof(Attribute) && typeof(Attribute).IsAssignableFrom(curInterface))
+                                                       {
+                                                               foundInterfaces.Add(new WeakReference(curInterface));
+                                                       }
+                                               }
+                                               actClazz = actClazz.BaseType;
+                                       }
+                                       while (actClazz != null);
+                               }
+                       }
+                       
+                       // add all interfaces of this AttributeImpl to the maps
+                       for (System.Collections.IEnumerator it = foundInterfaces.GetEnumerator(); it.MoveNext(); )
+                       {
+                WeakReference curInterfaceRef = (WeakReference)it.Current;
+                               System.Type curInterface = (System.Type) curInterfaceRef.Target;
+                System.Diagnostics.Debug.Assert(curInterface != null,"We have a strong reference on the class holding the interfaces, so they should never get evicted");
+                               // Attribute is a superclass of this interface
+                               if (!attributes.ContainsKey(curInterface))
+                               {
+                                       // invalidate state to force recomputation in captureState()
+                                       this.currentState = null;
+                    attributes.Add(new SupportClass.AttributeImplItem(curInterface, att));
+                    if (!attributeImpls.ContainsKey(clazz))
+                    {
+                        attributeImpls.Add(new SupportClass.AttributeImplItem(clazz, att));
+                    }
+                               }
+                       }
+               }
+               
+               /// <summary> The caller must pass in a Class&lt;? extends Attribute&gt; value.
+               /// This method first checks if an instance of that class is 
+               /// already in this AttributeSource and returns it. Otherwise a
+               /// new instance is created, added to this AttributeSource and returned. 
+               /// Signature for Java 1.5: <code>public &lt;T extends Attribute&gt; T addAttribute(Class&lt;T&gt;)</code>
+               /// </summary>
+               public virtual Attribute AddAttribute(System.Type attClass)
+               {
+                       if (!attributes.ContainsKey(attClass))
+                       {
+                if (!(attClass.IsInterface &&  typeof(Attribute).IsAssignableFrom(attClass))) 
+                {
+                    throw new ArgumentException(
+                        "AddAttribute() only accepts an interface that extends Attribute, but " +
+                        attClass.FullName + " does not fulfil this contract."
+                    );
+                }
+
+                               AttributeImpl attImpl = this.factory.CreateAttributeInstance(attClass);
+                               AddAttributeImpl(attImpl);
+                               return attImpl;
+                       }
+                       else
+                       {
+                               return attributes[attClass].Value;
+                       }
+               }
+               
+               /// <summary>Returns true, iff this AttributeSource has any attributes </summary>
+               public virtual bool HasAttributes()
+               {
+                       return !(this.attributes.Count == 0);
+               }
+               
+               /// <summary> The caller must pass in a Class&lt;? extends Attribute&gt; value. 
+               /// Returns true, iff this AttributeSource contains the passed-in Attribute.
+               /// Signature for Java 1.5: <code>public boolean hasAttribute(Class&lt;? extends Attribute&gt;)</code>
+               /// </summary>
+               public virtual bool HasAttribute(System.Type attClass)
+               {
+                       return this.attributes.Contains(attClass);
+               }
+               
+               /// <summary> The caller must pass in a Class&lt;? extends Attribute&gt; value. 
+               /// Returns the instance of the passed in Attribute contained in this AttributeSource
+               /// Signature for Java 1.5: <code>public &lt;T extends Attribute&gt; T getAttribute(Class&lt;T&gt;)</code>
+               /// 
+               /// </summary>
+               /// <throws>  IllegalArgumentException if this AttributeSource does not contain the </throws>
+               /// <summary>         Attribute. It is recommended to always use {@link #addAttribute} even in consumers
+               /// of TokenStreams, because you cannot know if a specific TokenStream really uses
+               /// a specific Attribute. {@link #addAttribute} will automatically make the attribute
+               /// available. If you want to only use the attribute, if it is available (to optimize
+               /// consuming), use {@link #hasAttribute}.
+               /// </summary>
+               public virtual Attribute GetAttribute(System.Type attClass)
+               {
+            if (!this.attributes.ContainsKey(attClass))
+            {
+                throw new System.ArgumentException("This AttributeSource does not have the attribute '" + attClass.FullName + "'.");
+            }
+            else
+            {
+                return this.attributes[attClass].Value;
+            }
+               }
+               
+               /// <summary> This class holds the state of an AttributeSource.</summary>
+               /// <seealso cref="captureState">
+               /// </seealso>
+               /// <seealso cref="restoreState">
+               /// </seealso>
+               public sealed class State : System.ICloneable
+               {
+                       internal /*private*/ AttributeImpl attribute;
+                       internal /*private*/ State next;
+                       
+                       public System.Object Clone()
+                       {
+                               State clone = new State();
+                               clone.attribute = (AttributeImpl) attribute.Clone();
+                               
+                               if (next != null)
+                               {
+                                       clone.next = (State) next.Clone();
+                               }
+                               
+                               return clone;
+                       }
+               }
+               
+               private State currentState = null;
+               
+               private void  ComputeCurrentState()
+               {
+                       currentState = new State();
+                       State c = currentState;
+            System.Collections.Generic.IEnumerator<SupportClass.AttributeImplItem> it = attributeImpls.GetEnumerator();
+                       if (it.MoveNext())
+                               c.attribute = it.Current.Value;
+                       while (it.MoveNext())
+                       {
+                               c.next = new State();
+                               c = c.next;
+                               c.attribute = it.Current.Value;
+                       }
+               }
+               
+               /// <summary> Resets all Attributes in this AttributeSource by calling
+               /// {@link AttributeImpl#Clear()} on each Attribute implementation.
+               /// </summary>
+               public virtual void  ClearAttributes()
+               {
+                       if (HasAttributes())
+                       {
+                               if (currentState == null)
+                               {
+                                       ComputeCurrentState();
+                               }
+                               for (State state = currentState; state != null; state = state.next)
+                               {
+                                       state.attribute.Clear();
+                               }
+                       }
+               }
+               
+               /// <summary> Captures the state of all Attributes. The return value can be passed to
+               /// {@link #restoreState} to restore the state of this or another AttributeSource.
+               /// </summary>
+               public virtual State CaptureState()
+               {
+                       if (!HasAttributes())
+                       {
+                               return null;
+                       }
+                       
+                       if (currentState == null)
+                       {
+                               ComputeCurrentState();
+                       }
+                       return (State) this.currentState.Clone();
+               }
+               
+               /// <summary> Restores this state by copying the values of all attribute implementations
+               /// that this state contains into the attributes implementations of the targetStream.
+               /// The targetStream must contain a corresponding instance for each argument
+               /// contained in this state (e.g. it is not possible to restore the state of
+               /// an AttributeSource containing a TermAttribute into a AttributeSource using
+               /// a Token instance as implementation).
+               /// 
+               /// Note that this method does not affect attributes of the targetStream
+               /// that are not contained in this state. In other words, if for example
+               /// the targetStream contains an OffsetAttribute, but this state doesn't, then
+               /// the value of the OffsetAttribute remains unchanged. It might be desirable to
+               /// reset its value to the default, in which case the caller should first
+               /// call {@link TokenStream#ClearAttributes()} on the targetStream.   
+               /// </summary>
+               public virtual void  RestoreState(State state)
+               {
+                       if (state == null)
+                               return ;
+                       
+                       do 
+                       {
+                               if (!attributeImpls.ContainsKey(state.attribute.GetType()))
+                               {
+                                       throw new System.ArgumentException("State contains an AttributeImpl that is not in this AttributeSource");
+                               }
+                               state.attribute.CopyTo(attributeImpls[state.attribute.GetType()].Value);
+                               state = state.next;
+                       }
+                       while (state != null);
+               }
+               
+               public override int GetHashCode()
+               {
+                       int code = 0;
+                       if (HasAttributes())
+                       {
+                               if (currentState == null)
+                               {
+                                       ComputeCurrentState();
+                               }
+                               for (State state = currentState; state != null; state = state.next)
+                               {
+                                       code = code * 31 + state.attribute.GetHashCode();
+                               }
+                       }
+                       
+                       return code;
+               }
+               
+               public  override bool Equals(System.Object obj)
+               {
+                       if (obj == this)
+                       {
+                               return true;
+                       }
+                       
+                       if (obj is AttributeSource)
+                       {
+                               AttributeSource other = (AttributeSource) obj;
+                               
+                               if (HasAttributes())
+                               {
+                                       if (!other.HasAttributes())
+                                       {
+                                               return false;
+                                       }
+                                       
+                                       if (this.attributeImpls.Count != other.attributeImpls.Count)
+                                       {
+                                               return false;
+                                       }
+                                       
+                                       // it is only equal if all attribute impls are the same in the same order
+                                       if (this.currentState == null)
+                                       {
+                                               this.ComputeCurrentState();
+                                       }
+                                       State thisState = this.currentState;
+                                       if (other.currentState == null)
+                                       {
+                                               other.ComputeCurrentState();
+                                       }
+                                       State otherState = other.currentState;
+                                       while (thisState != null && otherState != null)
+                                       {
+                                               if (otherState.attribute.GetType() != thisState.attribute.GetType() || !otherState.attribute.Equals(thisState.attribute))
+                                               {
+                                                       return false;
+                                               }
+                                               thisState = thisState.next;
+                                               otherState = otherState.next;
+                                       }
+                                       return true;
+                               }
+                               else
+                               {
+                                       return !other.HasAttributes();
+                               }
+                       }
+                       else
+                               return false;
+               }
+               
+               public override System.String ToString()
+               {
+                       System.Text.StringBuilder sb = new System.Text.StringBuilder();
+                       sb.Append('(');
+                       
+                       if (HasAttributes())
+                       {
+                               if (currentState == null)
+                               {
+                                       ComputeCurrentState();
+                               }
+                               for (State state = currentState; state != null; state = state.next)
+                               {
+                                       if (state != currentState)
+                                               sb.Append(',');
+                                       sb.Append(state.attribute.ToString());
+                               }
+                       }
+                       sb.Append(')');
+                       return sb.ToString();
+               }
+               
+               /// <summary> Performs a clone of all {@link AttributeImpl} instances returned in a new
+               /// AttributeSource instance. This method can be used to e.g. create another TokenStream
+               /// with exactly the same attributes (using {@link #AttributeSource(AttributeSource)})
+               /// </summary>
+               public virtual AttributeSource CloneAttributes()
+               {
+                       AttributeSource clone = new AttributeSource(this.factory);
+                       
+                       // first clone the impls
+                       if (HasAttributes())
+                       {
+                               if (currentState == null)
+                               {
+                                       ComputeCurrentState();
+                               }
+                               for (State state = currentState; state != null; state = state.next)
+                               {
+                                       AttributeImpl impl = (AttributeImpl) state.attribute.Clone();
+                    clone.attributeImpls.Add(new SupportClass.AttributeImplItem(impl.GetType(), impl));
+                               }
+                       }
+                       
+                       // now the interfaces
+            foreach (SupportClass.AttributeImplItem att in this.attributes)
+                       {
+                clone.attributes.Add(new SupportClass.AttributeImplItem(att.Key, clone.attributeImpls[att.Value.GetType()].Value));
+                       }
+                       
+                       return clone;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/AverageGuessMemoryModel.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/AverageGuessMemoryModel.cs
new file mode 100644 (file)
index 0000000..9eb2dc5
--- /dev/null
@@ -0,0 +1,123 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Util
+{
+       
+       /// <summary> An average, best guess, MemoryModel that should work okay on most systems.
+       /// 
+       /// </summary>
+       public class AverageGuessMemoryModel:MemoryModel
+       {
+               public AverageGuessMemoryModel()
+               {
+                       InitBlock();
+               }
+               internal class AnonymousClassIdentityHashMap : System.Collections.Hashtable /*IdentityHashMap*/  // {{Aroush-2.9.0}} Port issue? Will this do the trick to mimic java's IdentityHashMap?
+               {
+                       public AnonymousClassIdentityHashMap(AverageGuessMemoryModel enclosingInstance)
+                       {
+                               InitBlock(enclosingInstance);
+                       }
+                       private void  InitBlock(AverageGuessMemoryModel enclosingInstance)
+                       {
+                               this.enclosingInstance = enclosingInstance;
+                               Add(typeof(bool), 1);
+                               Add(typeof(byte), 1);
+                Add(typeof(sbyte), 1);
+                               Add(typeof(char), 2);
+                               Add(typeof(short), 2);
+                               Add(typeof(int), 4);
+                               Add(typeof(float), 4);
+                               Add(typeof(double), 8);
+                               Add(typeof(long), 8);
+                       }
+                       private AverageGuessMemoryModel enclosingInstance;
+                       public AverageGuessMemoryModel Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+            // {{Aroush-2.9.0}} Port issue? Will this do the trick to mimic java's IdentityHashMap?
+            /*
+             * From Java docs:
+             * This class implements the Map interface with a hash table, using 
+             * reference-equality in place of object-equality when comparing keys 
+             * (and values). In other words, in an IdentityHashMap, two keys k1 and k2 
+             * are considered equal if and only if (k1==k2). (In normal Map 
+             * implementations (like HashMap) two keys k1 and k2 are considered 
+             * equal if and only if (k1==null ? k2==null : k1.equals(k2)).) 
+             */
+            public new bool Equals(Object obj)
+            {
+                return this.GetHashCode() == obj.GetHashCode();
+            }
+            public new static bool Equals(Object objA, Object objB)
+            {
+                return objA.GetHashCode() == objB.GetHashCode();
+            }
+            // {{Aroush-2.9.0}} Port issue, need to mimic java's IdentityHashMap
+               }
+               private void  InitBlock()
+               {
+                       sizes = new AnonymousClassIdentityHashMap(this);
+               }
+               // best guess primitive sizes
+               private System.Collections.IDictionary sizes;
+               
+               /*
+               * (non-Javadoc)
+               * 
+               * @see Mono.Lucene.Net.Util.MemoryModel#getArraySize()
+               */
+               public override int GetArraySize()
+               {
+                       return 16;
+               }
+               
+               /*
+               * (non-Javadoc)
+               * 
+               * @see Mono.Lucene.Net.Util.MemoryModel#getClassSize()
+               */
+               public override int GetClassSize()
+               {
+                       return 8;
+               }
+               
+               /* (non-Javadoc)
+               * @see Mono.Lucene.Net.Util.MemoryModel#getPrimitiveSize(java.lang.Class)
+               */
+               public override int GetPrimitiveSize(System.Type clazz)
+               {
+                       return ((System.Int32) sizes[clazz]);
+               }
+               
+               /* (non-Javadoc)
+               * @see Mono.Lucene.Net.Util.MemoryModel#getReferenceSize()
+               */
+               public override int GetReferenceSize()
+               {
+                       return 4;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/BitUtil.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/BitUtil.cs
new file mode 100644 (file)
index 0000000..d59b6d1
--- /dev/null
@@ -0,0 +1,877 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Util
+{
+       // from org.apache.solr.util rev 555343
+       
+       /// <summary>A variety of high efficiencly bit twiddling routines.
+       /// 
+       /// </summary>
+       /// <version>  $Id$
+       /// </version>
+       public class BitUtil
+       {
+               
+               /// <summary>Returns the number of bits set in the long </summary>
+               public static int Pop(long x)
+               {
+                       /* Hacker's Delight 32 bit pop function:
+                       * http://www.hackersdelight.org/HDcode/newCode/pop_arrayHS.cc
+                       *
+                       int pop(unsigned x) {
+                       x = x - ((x >> 1) & 0x55555555);
+                       x = (x & 0x33333333) + ((x >> 2) & 0x33333333);
+                       x = (x + (x >> 4)) & 0x0F0F0F0F;
+                       x = x + (x >> 8);
+                       x = x + (x >> 16);
+                       return x & 0x0000003F;
+                       }
+                       ***/
+                       
+                       // 64 bit java version of the C function from above
+                       x = x - ((SupportClass.Number.URShift(x, 1)) & 0x5555555555555555L);
+                       x = (x & 0x3333333333333333L) + ((SupportClass.Number.URShift(x, 2)) & 0x3333333333333333L);
+                       x = (x + (SupportClass.Number.URShift(x, 4))) & 0x0F0F0F0F0F0F0F0FL;
+                       x = x + (SupportClass.Number.URShift(x, 8));
+                       x = x + (SupportClass.Number.URShift(x, 16));
+                       x = x + (SupportClass.Number.URShift(x, 32));
+                       return ((int) x) & 0x7F;
+               }
+               
+               /// <summary> Returns the number of set bits in an array of longs. </summary>
+               public static long Pop_array(long[] A, int wordOffset, int numWords)
+               {
+                       /*
+                       * Robert Harley and David Seal's bit counting algorithm, as documented
+                       * in the revisions of Hacker's Delight
+                       * http://www.hackersdelight.org/revisions.pdf
+                       * http://www.hackersdelight.org/HDcode/newCode/pop_arrayHS.cc
+                       *
+                       * This function was adapted to Java, and extended to use 64 bit words.
+                       * if only we had access to wider registers like SSE from java...
+                       *
+                       * This function can be transformed to compute the popcount of other functions
+                       * on bitsets via something like this:
+                       * sed 's/A\[\([^]]*\)\]/\(A[\1] \& B[\1]\)/g'
+                       *
+                       */
+                       int n = wordOffset + numWords;
+                       long tot = 0, tot8 = 0;
+                       long ones = 0, twos = 0, fours = 0;
+                       
+                       int i;
+                       for (i = wordOffset; i <= n - 8; i += 8)
+                       {
+                               /***  C macro from Hacker's Delight
+                               #define CSA(h,l, a,b,c) \
+                               {unsigned u = a ^ b; unsigned v = c; \
+                               h = (a & b) | (u & v); l = u ^ v;}
+                               ***/
+                               
+                               long twosA, twosB, foursA, foursB, eights;
+                               
+                               // CSA(twosA, ones, ones, A[i], A[i+1])
+                               {
+                                       long b = A[i], c = A[i + 1];
+                                       long u = ones ^ b;
+                                       twosA = (ones & b) | (u & c);
+                                       ones = u ^ c;
+                               }
+                               // CSA(twosB, ones, ones, A[i+2], A[i+3])
+                               {
+                                       long b = A[i + 2], c = A[i + 3];
+                                       long u = ones ^ b;
+                                       twosB = (ones & b) | (u & c);
+                                       ones = u ^ c;
+                               }
+                               //CSA(foursA, twos, twos, twosA, twosB)
+                               {
+                                       long u = twos ^ twosA;
+                                       foursA = (twos & twosA) | (u & twosB);
+                                       twos = u ^ twosB;
+                               }
+                               //CSA(twosA, ones, ones, A[i+4], A[i+5])
+                               {
+                                       long b = A[i + 4], c = A[i + 5];
+                                       long u = ones ^ b;
+                                       twosA = (ones & b) | (u & c);
+                                       ones = u ^ c;
+                               }
+                               // CSA(twosB, ones, ones, A[i+6], A[i+7])
+                               {
+                                       long b = A[i + 6], c = A[i + 7];
+                                       long u = ones ^ b;
+                                       twosB = (ones & b) | (u & c);
+                                       ones = u ^ c;
+                               }
+                               //CSA(foursB, twos, twos, twosA, twosB)
+                               {
+                                       long u = twos ^ twosA;
+                                       foursB = (twos & twosA) | (u & twosB);
+                                       twos = u ^ twosB;
+                               }
+                               
+                               //CSA(eights, fours, fours, foursA, foursB)
+                               {
+                                       long u = fours ^ foursA;
+                                       eights = (fours & foursA) | (u & foursB);
+                                       fours = u ^ foursB;
+                               }
+                               tot8 += Pop(eights);
+                       }
+                       
+                       // handle trailing words in a binary-search manner...
+                       // derived from the loop above by setting specific elements to 0.
+                       // the original method in Hackers Delight used a simple for loop:
+                       //   for (i = i; i < n; i++)      // Add in the last elements
+                       //  tot = tot + pop(A[i]);
+                       
+                       if (i <= n - 4)
+                       {
+                               long twosA, twosB, foursA, eights;
+                               {
+                                       long b = A[i], c = A[i + 1];
+                                       long u = ones ^ b;
+                                       twosA = (ones & b) | (u & c);
+                                       ones = u ^ c;
+                               }
+                               {
+                                       long b = A[i + 2], c = A[i + 3];
+                                       long u = ones ^ b;
+                                       twosB = (ones & b) | (u & c);
+                                       ones = u ^ c;
+                               }
+                               {
+                                       long u = twos ^ twosA;
+                                       foursA = (twos & twosA) | (u & twosB);
+                                       twos = u ^ twosB;
+                               }
+                               eights = fours & foursA;
+                               fours = fours ^ foursA;
+                               
+                               tot8 += Pop(eights);
+                               i += 4;
+                       }
+                       
+                       if (i <= n - 2)
+                       {
+                               long b = A[i], c = A[i + 1];
+                               long u = ones ^ b;
+                               long twosA = (ones & b) | (u & c);
+                               ones = u ^ c;
+                               
+                               long foursA = twos & twosA;
+                               twos = twos ^ twosA;
+                               
+                               long eights = fours & foursA;
+                               fours = fours ^ foursA;
+                               
+                               tot8 += Pop(eights);
+                               i += 2;
+                       }
+                       
+                       if (i < n)
+                       {
+                               tot += Pop(A[i]);
+                       }
+                       
+                       tot += (Pop(fours) << 2) + (Pop(twos) << 1) + Pop(ones) + (tot8 << 3);
+                       
+                       return tot;
+               }
+               
+               /// <summary>Returns the popcount or cardinality of the two sets after an intersection.
+               /// Neither array is modified.
+               /// </summary>
+               public static long Pop_intersect(long[] A, long[] B, int wordOffset, int numWords)
+               {
+                       // generated from pop_array via sed 's/A\[\([^]]*\)\]/\(A[\1] \& B[\1]\)/g'
+                       int n = wordOffset + numWords;
+                       long tot = 0, tot8 = 0;
+                       long ones = 0, twos = 0, fours = 0;
+                       
+                       int i;
+                       for (i = wordOffset; i <= n - 8; i += 8)
+                       {
+                               long twosA, twosB, foursA, foursB, eights;
+                               
+                               // CSA(twosA, ones, ones, (A[i] & B[i]), (A[i+1] & B[i+1]))
+                               {
+                                       long b = (A[i] & B[i]), c = (A[i + 1] & B[i + 1]);
+                                       long u = ones ^ b;
+                                       twosA = (ones & b) | (u & c);
+                                       ones = u ^ c;
+                               }
+                               // CSA(twosB, ones, ones, (A[i+2] & B[i+2]), (A[i+3] & B[i+3]))
+                               {
+                                       long b = (A[i + 2] & B[i + 2]), c = (A[i + 3] & B[i + 3]);
+                                       long u = ones ^ b;
+                                       twosB = (ones & b) | (u & c);
+                                       ones = u ^ c;
+                               }
+                               //CSA(foursA, twos, twos, twosA, twosB)
+                               {
+                                       long u = twos ^ twosA;
+                                       foursA = (twos & twosA) | (u & twosB);
+                                       twos = u ^ twosB;
+                               }
+                               //CSA(twosA, ones, ones, (A[i+4] & B[i+4]), (A[i+5] & B[i+5]))
+                               {
+                                       long b = (A[i + 4] & B[i + 4]), c = (A[i + 5] & B[i + 5]);
+                                       long u = ones ^ b;
+                                       twosA = (ones & b) | (u & c);
+                                       ones = u ^ c;
+                               }
+                               // CSA(twosB, ones, ones, (A[i+6] & B[i+6]), (A[i+7] & B[i+7]))
+                               {
+                                       long b = (A[i + 6] & B[i + 6]), c = (A[i + 7] & B[i + 7]);
+                                       long u = ones ^ b;
+                                       twosB = (ones & b) | (u & c);
+                                       ones = u ^ c;
+                               }
+                               //CSA(foursB, twos, twos, twosA, twosB)
+                               {
+                                       long u = twos ^ twosA;
+                                       foursB = (twos & twosA) | (u & twosB);
+                                       twos = u ^ twosB;
+                               }
+                               
+                               //CSA(eights, fours, fours, foursA, foursB)
+                               {
+                                       long u = fours ^ foursA;
+                                       eights = (fours & foursA) | (u & foursB);
+                                       fours = u ^ foursB;
+                               }
+                               tot8 += Pop(eights);
+                       }
+                       
+                       
+                       if (i <= n - 4)
+                       {
+                               long twosA, twosB, foursA, eights;
+                               {
+                                       long b = (A[i] & B[i]), c = (A[i + 1] & B[i + 1]);
+                                       long u = ones ^ b;
+                                       twosA = (ones & b) | (u & c);
+                                       ones = u ^ c;
+                               }
+                               {
+                                       long b = (A[i + 2] & B[i + 2]), c = (A[i + 3] & B[i + 3]);
+                                       long u = ones ^ b;
+                                       twosB = (ones & b) | (u & c);
+                                       ones = u ^ c;
+                               }
+                               {
+                                       long u = twos ^ twosA;
+                                       foursA = (twos & twosA) | (u & twosB);
+                                       twos = u ^ twosB;
+                               }
+                               eights = fours & foursA;
+                               fours = fours ^ foursA;
+                               
+                               tot8 += Pop(eights);
+                               i += 4;
+                       }
+                       
+                       if (i <= n - 2)
+                       {
+                               long b = (A[i] & B[i]), c = (A[i + 1] & B[i + 1]);
+                               long u = ones ^ b;
+                               long twosA = (ones & b) | (u & c);
+                               ones = u ^ c;
+                               
+                               long foursA = twos & twosA;
+                               twos = twos ^ twosA;
+                               
+                               long eights = fours & foursA;
+                               fours = fours ^ foursA;
+                               
+                               tot8 += Pop(eights);
+                               i += 2;
+                       }
+                       
+                       if (i < n)
+                       {
+                               tot += Pop((A[i] & B[i]));
+                       }
+                       
+                       tot += (Pop(fours) << 2) + (Pop(twos) << 1) + Pop(ones) + (tot8 << 3);
+                       
+                       return tot;
+               }
+               
+               /// <summary>Returns the popcount or cardinality of the union of two sets.
+               /// Neither array is modified.
+               /// </summary>
+               public static long Pop_union(long[] A, long[] B, int wordOffset, int numWords)
+               {
+                       // generated from pop_array via sed 's/A\[\([^]]*\)\]/\(A[\1] \| B[\1]\)/g'
+                       int n = wordOffset + numWords;
+                       long tot = 0, tot8 = 0;
+                       long ones = 0, twos = 0, fours = 0;
+                       
+                       int i;
+                       for (i = wordOffset; i <= n - 8; i += 8)
+                       {
+                               /***  C macro from Hacker's Delight
+                               #define CSA(h,l, a,b,c) \
+                               {unsigned u = a ^ b; unsigned v = c; \
+                               h = (a & b) | (u & v); l = u ^ v;}
+                               ***/
+                               
+                               long twosA, twosB, foursA, foursB, eights;
+                               
+                               // CSA(twosA, ones, ones, (A[i] | B[i]), (A[i+1] | B[i+1]))
+                               {
+                                       long b = (A[i] | B[i]), c = (A[i + 1] | B[i + 1]);
+                                       long u = ones ^ b;
+                                       twosA = (ones & b) | (u & c);
+                                       ones = u ^ c;
+                               }
+                               // CSA(twosB, ones, ones, (A[i+2] | B[i+2]), (A[i+3] | B[i+3]))
+                               {
+                                       long b = (A[i + 2] | B[i + 2]), c = (A[i + 3] | B[i + 3]);
+                                       long u = ones ^ b;
+                                       twosB = (ones & b) | (u & c);
+                                       ones = u ^ c;
+                               }
+                               //CSA(foursA, twos, twos, twosA, twosB)
+                               {
+                                       long u = twos ^ twosA;
+                                       foursA = (twos & twosA) | (u & twosB);
+                                       twos = u ^ twosB;
+                               }
+                               //CSA(twosA, ones, ones, (A[i+4] | B[i+4]), (A[i+5] | B[i+5]))
+                               {
+                                       long b = (A[i + 4] | B[i + 4]), c = (A[i + 5] | B[i + 5]);
+                                       long u = ones ^ b;
+                                       twosA = (ones & b) | (u & c);
+                                       ones = u ^ c;
+                               }
+                               // CSA(twosB, ones, ones, (A[i+6] | B[i+6]), (A[i+7] | B[i+7]))
+                               {
+                                       long b = (A[i + 6] | B[i + 6]), c = (A[i + 7] | B[i + 7]);
+                                       long u = ones ^ b;
+                                       twosB = (ones & b) | (u & c);
+                                       ones = u ^ c;
+                               }
+                               //CSA(foursB, twos, twos, twosA, twosB)
+                               {
+                                       long u = twos ^ twosA;
+                                       foursB = (twos & twosA) | (u & twosB);
+                                       twos = u ^ twosB;
+                               }
+                               
+                               //CSA(eights, fours, fours, foursA, foursB)
+                               {
+                                       long u = fours ^ foursA;
+                                       eights = (fours & foursA) | (u & foursB);
+                                       fours = u ^ foursB;
+                               }
+                               tot8 += Pop(eights);
+                       }
+                       
+                       
+                       if (i <= n - 4)
+                       {
+                               long twosA, twosB, foursA, eights;
+                               {
+                                       long b = (A[i] | B[i]), c = (A[i + 1] | B[i + 1]);
+                                       long u = ones ^ b;
+                                       twosA = (ones & b) | (u & c);
+                                       ones = u ^ c;
+                               }
+                               {
+                                       long b = (A[i + 2] | B[i + 2]), c = (A[i + 3] | B[i + 3]);
+                                       long u = ones ^ b;
+                                       twosB = (ones & b) | (u & c);
+                                       ones = u ^ c;
+                               }
+                               {
+                                       long u = twos ^ twosA;
+                                       foursA = (twos & twosA) | (u & twosB);
+                                       twos = u ^ twosB;
+                               }
+                               eights = fours & foursA;
+                               fours = fours ^ foursA;
+                               
+                               tot8 += Pop(eights);
+                               i += 4;
+                       }
+                       
+                       if (i <= n - 2)
+                       {
+                               long b = (A[i] | B[i]), c = (A[i + 1] | B[i + 1]);
+                               long u = ones ^ b;
+                               long twosA = (ones & b) | (u & c);
+                               ones = u ^ c;
+                               
+                               long foursA = twos & twosA;
+                               twos = twos ^ twosA;
+                               
+                               long eights = fours & foursA;
+                               fours = fours ^ foursA;
+                               
+                               tot8 += Pop(eights);
+                               i += 2;
+                       }
+                       
+                       if (i < n)
+                       {
+                               tot += Pop((A[i] | B[i]));
+                       }
+                       
+                       tot += (Pop(fours) << 2) + (Pop(twos) << 1) + Pop(ones) + (tot8 << 3);
+                       
+                       return tot;
+               }
+               
+               /// <summary>Returns the popcount or cardinality of A &amp; ~B
+               /// Neither array is modified.
+               /// </summary>
+               public static long Pop_andnot(long[] A, long[] B, int wordOffset, int numWords)
+               {
+                       // generated from pop_array via sed 's/A\[\([^]]*\)\]/\(A[\1] \& ~B[\1]\)/g'
+                       int n = wordOffset + numWords;
+                       long tot = 0, tot8 = 0;
+                       long ones = 0, twos = 0, fours = 0;
+                       
+                       int i;
+                       for (i = wordOffset; i <= n - 8; i += 8)
+                       {
+                               /***  C macro from Hacker's Delight
+                               #define CSA(h,l, a,b,c) \
+                               {unsigned u = a ^ b; unsigned v = c; \
+                               h = (a & b) | (u & v); l = u ^ v;}
+                               ***/
+                               
+                               long twosA, twosB, foursA, foursB, eights;
+                               
+                               // CSA(twosA, ones, ones, (A[i] & ~B[i]), (A[i+1] & ~B[i+1]))
+                               {
+                                       long b = (A[i] & ~ B[i]), c = (A[i + 1] & ~ B[i + 1]);
+                                       long u = ones ^ b;
+                                       twosA = (ones & b) | (u & c);
+                                       ones = u ^ c;
+                               }
+                               // CSA(twosB, ones, ones, (A[i+2] & ~B[i+2]), (A[i+3] & ~B[i+3]))
+                               {
+                                       long b = (A[i + 2] & ~ B[i + 2]), c = (A[i + 3] & ~ B[i + 3]);
+                                       long u = ones ^ b;
+                                       twosB = (ones & b) | (u & c);
+                                       ones = u ^ c;
+                               }
+                               //CSA(foursA, twos, twos, twosA, twosB)
+                               {
+                                       long u = twos ^ twosA;
+                                       foursA = (twos & twosA) | (u & twosB);
+                                       twos = u ^ twosB;
+                               }
+                               //CSA(twosA, ones, ones, (A[i+4] & ~B[i+4]), (A[i+5] & ~B[i+5]))
+                               {
+                                       long b = (A[i + 4] & ~ B[i + 4]), c = (A[i + 5] & ~ B[i + 5]);
+                                       long u = ones ^ b;
+                                       twosA = (ones & b) | (u & c);
+                                       ones = u ^ c;
+                               }
+                               // CSA(twosB, ones, ones, (A[i+6] & ~B[i+6]), (A[i+7] & ~B[i+7]))
+                               {
+                                       long b = (A[i + 6] & ~ B[i + 6]), c = (A[i + 7] & ~ B[i + 7]);
+                                       long u = ones ^ b;
+                                       twosB = (ones & b) | (u & c);
+                                       ones = u ^ c;
+                               }
+                               //CSA(foursB, twos, twos, twosA, twosB)
+                               {
+                                       long u = twos ^ twosA;
+                                       foursB = (twos & twosA) | (u & twosB);
+                                       twos = u ^ twosB;
+                               }
+                               
+                               //CSA(eights, fours, fours, foursA, foursB)
+                               {
+                                       long u = fours ^ foursA;
+                                       eights = (fours & foursA) | (u & foursB);
+                                       fours = u ^ foursB;
+                               }
+                               tot8 += Pop(eights);
+                       }
+                       
+                       
+                       if (i <= n - 4)
+                       {
+                               long twosA, twosB, foursA, eights;
+                               {
+                                       long b = (A[i] & ~ B[i]), c = (A[i + 1] & ~ B[i + 1]);
+                                       long u = ones ^ b;
+                                       twosA = (ones & b) | (u & c);
+                                       ones = u ^ c;
+                               }
+                               {
+                                       long b = (A[i + 2] & ~ B[i + 2]), c = (A[i + 3] & ~ B[i + 3]);
+                                       long u = ones ^ b;
+                                       twosB = (ones & b) | (u & c);
+                                       ones = u ^ c;
+                               }
+                               {
+                                       long u = twos ^ twosA;
+                                       foursA = (twos & twosA) | (u & twosB);
+                                       twos = u ^ twosB;
+                               }
+                               eights = fours & foursA;
+                               fours = fours ^ foursA;
+                               
+                               tot8 += Pop(eights);
+                               i += 4;
+                       }
+                       
+                       if (i <= n - 2)
+                       {
+                               long b = (A[i] & ~ B[i]), c = (A[i + 1] & ~ B[i + 1]);
+                               long u = ones ^ b;
+                               long twosA = (ones & b) | (u & c);
+                               ones = u ^ c;
+                               
+                               long foursA = twos & twosA;
+                               twos = twos ^ twosA;
+                               
+                               long eights = fours & foursA;
+                               fours = fours ^ foursA;
+                               
+                               tot8 += Pop(eights);
+                               i += 2;
+                       }
+                       
+                       if (i < n)
+                       {
+                               tot += Pop((A[i] & ~ B[i]));
+                       }
+                       
+                       tot += (Pop(fours) << 2) + (Pop(twos) << 1) + Pop(ones) + (tot8 << 3);
+                       
+                       return tot;
+               }
+               
+               public static long Pop_xor(long[] A, long[] B, int wordOffset, int numWords)
+               {
+                       int n = wordOffset + numWords;
+                       long tot = 0, tot8 = 0;
+                       long ones = 0, twos = 0, fours = 0;
+                       
+                       int i;
+                       for (i = wordOffset; i <= n - 8; i += 8)
+                       {
+                               /***  C macro from Hacker's Delight
+                               #define CSA(h,l, a,b,c) \
+                               {unsigned u = a ^ b; unsigned v = c; \
+                               h = (a & b) | (u & v); l = u ^ v;}
+                               ***/
+                               
+                               long twosA, twosB, foursA, foursB, eights;
+                               
+                               // CSA(twosA, ones, ones, (A[i] ^ B[i]), (A[i+1] ^ B[i+1]))
+                               {
+                                       long b = (A[i] ^ B[i]), c = (A[i + 1] ^ B[i + 1]);
+                                       long u = ones ^ b;
+                                       twosA = (ones & b) | (u & c);
+                                       ones = u ^ c;
+                               }
+                               // CSA(twosB, ones, ones, (A[i+2] ^ B[i+2]), (A[i+3] ^ B[i+3]))
+                               {
+                                       long b = (A[i + 2] ^ B[i + 2]), c = (A[i + 3] ^ B[i + 3]);
+                                       long u = ones ^ b;
+                                       twosB = (ones & b) | (u & c);
+                                       ones = u ^ c;
+                               }
+                               //CSA(foursA, twos, twos, twosA, twosB)
+                               {
+                                       long u = twos ^ twosA;
+                                       foursA = (twos & twosA) | (u & twosB);
+                                       twos = u ^ twosB;
+                               }
+                               //CSA(twosA, ones, ones, (A[i+4] ^ B[i+4]), (A[i+5] ^ B[i+5]))
+                               {
+                                       long b = (A[i + 4] ^ B[i + 4]), c = (A[i + 5] ^ B[i + 5]);
+                                       long u = ones ^ b;
+                                       twosA = (ones & b) | (u & c);
+                                       ones = u ^ c;
+                               }
+                               // CSA(twosB, ones, ones, (A[i+6] ^ B[i+6]), (A[i+7] ^ B[i+7]))
+                               {
+                                       long b = (A[i + 6] ^ B[i + 6]), c = (A[i + 7] ^ B[i + 7]);
+                                       long u = ones ^ b;
+                                       twosB = (ones & b) | (u & c);
+                                       ones = u ^ c;
+                               }
+                               //CSA(foursB, twos, twos, twosA, twosB)
+                               {
+                                       long u = twos ^ twosA;
+                                       foursB = (twos & twosA) | (u & twosB);
+                                       twos = u ^ twosB;
+                               }
+                               
+                               //CSA(eights, fours, fours, foursA, foursB)
+                               {
+                                       long u = fours ^ foursA;
+                                       eights = (fours & foursA) | (u & foursB);
+                                       fours = u ^ foursB;
+                               }
+                               tot8 += Pop(eights);
+                       }
+                       
+                       
+                       if (i <= n - 4)
+                       {
+                               long twosA, twosB, foursA, eights;
+                               {
+                                       long b = (A[i] ^ B[i]), c = (A[i + 1] ^ B[i + 1]);
+                                       long u = ones ^ b;
+                                       twosA = (ones & b) | (u & c);
+                                       ones = u ^ c;
+                               }
+                               {
+                                       long b = (A[i + 2] ^ B[i + 2]), c = (A[i + 3] ^ B[i + 3]);
+                                       long u = ones ^ b;
+                                       twosB = (ones & b) | (u & c);
+                                       ones = u ^ c;
+                               }
+                               {
+                                       long u = twos ^ twosA;
+                                       foursA = (twos & twosA) | (u & twosB);
+                                       twos = u ^ twosB;
+                               }
+                               eights = fours & foursA;
+                               fours = fours ^ foursA;
+                               
+                               tot8 += Pop(eights);
+                               i += 4;
+                       }
+                       
+                       if (i <= n - 2)
+                       {
+                               long b = (A[i] ^ B[i]), c = (A[i + 1] ^ B[i + 1]);
+                               long u = ones ^ b;
+                               long twosA = (ones & b) | (u & c);
+                               ones = u ^ c;
+                               
+                               long foursA = twos & twosA;
+                               twos = twos ^ twosA;
+                               
+                               long eights = fours & foursA;
+                               fours = fours ^ foursA;
+                               
+                               tot8 += Pop(eights);
+                               i += 2;
+                       }
+                       
+                       if (i < n)
+                       {
+                               tot += Pop((A[i] ^ B[i]));
+                       }
+                       
+                       tot += (Pop(fours) << 2) + (Pop(twos) << 1) + Pop(ones) + (tot8 << 3);
+                       
+                       return tot;
+               }
+               
+               /* python code to generate ntzTable
+               def ntz(val):
+               if val==0: return 8
+               i=0
+               while (val&0x01)==0:
+               i = i+1
+               val >>= 1
+               return i
+               print ','.join([ str(ntz(i)) for i in range(256) ])
+               ***/
+               /// <summary>table of number of trailing zeros in a byte </summary>
+               public static readonly sbyte[] ntzTable = new sbyte[]{8, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 7, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0};
+               
+               
+               /// <summary>Returns number of trailing zeros in a 64 bit long value. </summary>
+               public static int Ntz(long val)
+               {
+                       // A full binary search to determine the low byte was slower than
+                       // a linear search for nextSetBit().  This is most likely because
+                       // the implementation of nextSetBit() shifts bits to the right, increasing
+                       // the probability that the first non-zero byte is in the rhs.
+                       //
+                       // This implementation does a single binary search at the top level only
+                       // so that all other bit shifting can be done on ints instead of longs to
+                       // remain friendly to 32 bit architectures.  In addition, the case of a
+                       // non-zero first byte is checked for first because it is the most common
+                       // in dense bit arrays.
+                       
+                       int lower = (int) val;
+                       int lowByte = lower & 0xff;
+                       if (lowByte != 0)
+                               return ntzTable[lowByte];
+                       
+                       if (lower != 0)
+                       {
+                               lowByte = (SupportClass.Number.URShift(lower, 8)) & 0xff;
+                               if (lowByte != 0)
+                                       return ntzTable[lowByte] + 8;
+                               lowByte = (SupportClass.Number.URShift(lower, 16)) & 0xff;
+                               if (lowByte != 0)
+                                       return ntzTable[lowByte] + 16;
+                               // no need to mask off low byte for the last byte in the 32 bit word
+                               // no need to check for zero on the last byte either.
+                               return ntzTable[SupportClass.Number.URShift(lower, 24)] + 24;
+                       }
+                       else
+                       {
+                               // grab upper 32 bits
+                               int upper = (int) (val >> 32);
+                               lowByte = upper & 0xff;
+                               if (lowByte != 0)
+                                       return ntzTable[lowByte] + 32;
+                               lowByte = (SupportClass.Number.URShift(upper, 8)) & 0xff;
+                               if (lowByte != 0)
+                                       return ntzTable[lowByte] + 40;
+                               lowByte = (SupportClass.Number.URShift(upper, 16)) & 0xff;
+                               if (lowByte != 0)
+                                       return ntzTable[lowByte] + 48;
+                               // no need to mask off low byte for the last byte in the 32 bit word
+                               // no need to check for zero on the last byte either.
+                               return ntzTable[SupportClass.Number.URShift(upper, 24)] + 56;
+                       }
+               }
+               
+               /// <summary>Returns number of trailing zeros in a 32 bit int value. </summary>
+               public static int Ntz(int val)
+               {
+                       // This implementation does a single binary search at the top level only.
+                       // In addition, the case of a non-zero first byte is checked for first
+                       // because it is the most common in dense bit arrays.
+                       
+                       int lowByte = val & 0xff;
+                       if (lowByte != 0)
+                               return ntzTable[lowByte];
+                       lowByte = (SupportClass.Number.URShift(val, 8)) & 0xff;
+                       if (lowByte != 0)
+                               return ntzTable[lowByte] + 8;
+                       lowByte = (SupportClass.Number.URShift(val, 16)) & 0xff;
+                       if (lowByte != 0)
+                               return ntzTable[lowByte] + 16;
+                       // no need to mask off low byte for the last byte.
+                       // no need to check for zero on the last byte either.
+                       return ntzTable[SupportClass.Number.URShift(val, 24)] + 24;
+               }
+               
+               /// <summary>returns 0 based index of first set bit
+               /// (only works for x!=0)
+               /// <br/> This is an alternate implementation of ntz()
+               /// </summary>
+               public static int Ntz2(long x)
+               {
+                       int n = 0;
+                       int y = (int) x;
+                       if (y == 0)
+                       {
+                               n += 32; y = (int) (SupportClass.Number.URShift(x, 32));
+                       } // the only 64 bit shift necessary
+                       if ((y & 0x0000FFFF) == 0)
+                       {
+                               n += 16; y = SupportClass.Number.URShift(y, 16);
+                       }
+                       if ((y & 0x000000FF) == 0)
+                       {
+                               n += 8; y = SupportClass.Number.URShift(y, 8);
+                       }
+                       return (ntzTable[y & 0xff]) + n;
+               }
+               
+               /// <summary>returns 0 based index of first set bit
+               /// <br/> This is an alternate implementation of ntz()
+               /// </summary>
+               public static int Ntz3(long x)
+               {
+                       // another implementation taken from Hackers Delight, extended to 64 bits
+                       // and converted to Java.
+                       // Many 32 bit ntz algorithms are at http://www.hackersdelight.org/HDcode/ntz.cc
+                       int n = 1;
+                       
+                       // do the first step as a long, all others as ints.
+                       int y = (int) x;
+                       if (y == 0)
+                       {
+                               n += 32; y = (int) (SupportClass.Number.URShift(x, 32));
+                       }
+                       if ((y & 0x0000FFFF) == 0)
+                       {
+                               n += 16; y = SupportClass.Number.URShift(y, 16);
+                       }
+                       if ((y & 0x000000FF) == 0)
+                       {
+                               n += 8; y = SupportClass.Number.URShift(y, 8);
+                       }
+                       if ((y & 0x0000000F) == 0)
+                       {
+                               n += 4; y = SupportClass.Number.URShift(y, 4);
+                       }
+                       if ((y & 0x00000003) == 0)
+                       {
+                               n += 2; y = SupportClass.Number.URShift(y, 2);
+                       }
+                       return n - (y & 1);
+               }
+               
+               
+               /// <summary>returns true if v is a power of two or zero</summary>
+               public static bool IsPowerOfTwo(int v)
+               {
+                       return ((v & (v - 1)) == 0);
+               }
+               
+               /// <summary>returns true if v is a power of two or zero</summary>
+               public static bool IsPowerOfTwo(long v)
+               {
+                       return ((v & (v - 1)) == 0);
+               }
+               
+               /// <summary>returns the next highest power of two, or the current value if it's already a power of two or zero</summary>
+               public static int NextHighestPowerOfTwo(int v)
+               {
+                       v--;
+                       v |= v >> 1;
+                       v |= v >> 2;
+                       v |= v >> 4;
+                       v |= v >> 8;
+                       v |= v >> 16;
+                       v++;
+                       return v;
+               }
+               
+               /// <summary>returns the next highest power of two, or the current value if it's already a power of two or zero</summary>
+               public static long NextHighestPowerOfTwo(long v)
+               {
+                       v--;
+                       v |= v >> 1;
+                       v |= v >> 2;
+                       v |= v >> 4;
+                       v |= v >> 8;
+                       v |= v >> 16;
+                       v |= v >> 32;
+                       v++;
+                       return v;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/BitVector.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/BitVector.cs
new file mode 100644 (file)
index 0000000..0b85d15
--- /dev/null
@@ -0,0 +1,316 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using Directory = Mono.Lucene.Net.Store.Directory;
+using IndexInput = Mono.Lucene.Net.Store.IndexInput;
+using IndexOutput = Mono.Lucene.Net.Store.IndexOutput;
+
+namespace Mono.Lucene.Net.Util
+{
+       
+       /// <summary>Optimized implementation of a vector of bits.  This is more-or-less like
+       /// java.util.BitSet, but also includes the following:
+       /// <ul>
+       /// <li>a count() method, which efficiently computes the number of one bits;</li>
+       /// <li>optimized read from and write to disk;</li>
+       /// <li>inlinable get() method;</li>
+       /// <li>store and load, as bit set or d-gaps, depending on sparseness;</li> 
+       /// </ul>
+       /// </summary>
+       /// <version>  $Id: BitVector.java 765649 2009-04-16 14:29:26Z mikemccand $
+       /// </version>
+       public sealed class BitVector : System.ICloneable
+       {
+               
+               private byte[] bits;
+               private int size;
+               private int count;
+               
+               /// <summary>Constructs a vector capable of holding <code>n</code> bits. </summary>
+               public BitVector(int n)
+               {
+                       size = n;
+                       bits = new byte[(size >> 3) + 1];
+            count = 0;
+               }
+               
+               internal BitVector(byte[] bits, int size)
+               {
+                       this.bits = bits;
+                       this.size = size;
+            count = -1;
+               }
+               
+               public System.Object Clone()
+               {
+                       byte[] copyBits = new byte[bits.Length];
+                       Array.Copy(bits, 0, copyBits, 0, bits.Length);
+            BitVector clone = new BitVector(copyBits, size);
+            clone.count = count;
+            return clone;
+               }
+               
+               /// <summary>Sets the value of <code>bit</code> to one. </summary>
+               public void  Set(int bit)
+               {
+                       if (bit >= size)
+                       {
+                               throw new System. IndexOutOfRangeException("Index of bound " + bit);
+                       }
+                       bits[bit >> 3] |= (byte) (1 << (bit & 7));
+                       count = - 1;
+               }
+               
+               /// <summary>Sets the value of <code>bit</code> to true, and
+               /// returns true if bit was already set 
+               /// </summary>
+               public bool GetAndSet(int bit)
+               {
+                       if (bit >= size)
+                       {
+                               throw new System. IndexOutOfRangeException("Index of bound " + bit);
+                       }
+                       int pos = bit >> 3;
+                       int v = bits[pos];
+                       int flag = 1 << (bit & 7);
+                       if ((flag & v) != 0)
+                               return true;
+                       else
+                       {
+                               bits[pos] = (byte) (v | flag);
+                               if (count != - 1)
+                                       count++;
+                               return false;
+                       }
+               }
+               
+               /// <summary>Sets the value of <code>bit</code> to zero. </summary>
+               public void  Clear(int bit)
+               {
+                       if (bit >= size)
+                       {
+                               throw new System.IndexOutOfRangeException("Index of bound " + bit);
+                       }
+                       bits[bit >> 3] &= (byte) (~ (1 << (bit & 7)));
+                       count = - 1;
+               }
+               
+               /// <summary>Returns <code>true</code> if <code>bit</code> is one and
+               /// <code>false</code> if it is zero. 
+               /// </summary>
+               public bool Get(int bit)
+               {
+                       System.Diagnostics.Debug.Assert(bit >= 0 && bit < size, "bit " + bit + " is out of bounds 0.." +(size - 1));
+                       return (bits[bit >> 3] & (1 << (bit & 7))) != 0;
+               }
+               
+               /// <summary>Returns the number of bits in this vector.  This is also one greater than
+               /// the number of the largest valid bit number. 
+               /// </summary>
+               public int Size()
+               {
+                       return size;
+               }
+               
+               /// <summary>Returns the total number of one bits in this vector.  This is efficiently
+               /// computed and cached, so that, if the vector is not changed, no
+               /// recomputation is done for repeated calls. 
+               /// </summary>
+               public int Count()
+               {
+                       // if the vector has been modified
+                       if (count == - 1)
+                       {
+                               int c = 0;
+                               int end = bits.Length;
+                               for (int i = 0; i < end; i++)
+                                       c += BYTE_COUNTS[bits[i] & 0xFF]; // sum bits per byte
+                               count = c;
+                       }
+                       return count;
+               }
+
+        /// <summary>
+        /// For testing 
+        /// </summary>
+        public int GetRecomputedCount()
+        {
+            int c = 0;
+            int end = bits.Length;
+            for (int i = 0; i < end; i++)
+                c += BYTE_COUNTS[bits[i] & 0xFF];        // sum bits per byte
+            return c;
+        }
+               
+               private static readonly byte[] BYTE_COUNTS = new byte[]{0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8};
+               
+               
+               /// <summary>Writes this vector to the file <code>name</code> in Directory
+               /// <code>d</code>, in a format that can be read by the constructor {@link
+               /// #BitVector(Directory, String)}.  
+               /// </summary>
+               public void  Write(Directory d, System.String name)
+               {
+                       IndexOutput output = d.CreateOutput(name);
+                       try
+                       {
+                               if (IsSparse())
+                               {
+                                       WriteDgaps(output); // sparse bit-set more efficiently saved as d-gaps.
+                               }
+                               else
+                               {
+                                       WriteBits(output);
+                               }
+                       }
+                       finally
+                       {
+                               output.Close();
+                       }
+               }
+               
+               /// <summary>Write as a bit set </summary>
+               private void  WriteBits(IndexOutput output)
+               {
+                       output.WriteInt(Size()); // write size
+                       output.WriteInt(Count()); // write count
+                       output.WriteBytes(bits, bits.Length);
+               }
+               
+               /// <summary>Write as a d-gaps list </summary>
+               private void  WriteDgaps(IndexOutput output)
+               {
+                       output.WriteInt(- 1); // mark using d-gaps                         
+                       output.WriteInt(Size()); // write size
+                       output.WriteInt(Count()); // write count
+                       int last = 0;
+                       int n = Count();
+                       int m = bits.Length;
+                       for (int i = 0; i < m && n > 0; i++)
+                       {
+                               if (bits[i] != 0)
+                               {
+                                       output.WriteVInt(i - last);
+                                       output.WriteByte(bits[i]);
+                                       last = i;
+                                       n -= BYTE_COUNTS[bits[i] & 0xFF];
+                               }
+                       }
+               }
+               
+               /// <summary>Indicates if the bit vector is sparse and should be saved as a d-gaps list, or dense, and should be saved as a bit set. </summary>
+               private bool IsSparse()
+               {
+                       // note: order of comparisons below set to favor smaller values (no binary range search.)
+                       // note: adding 4 because we start with ((int) -1) to indicate d-gaps format.
+                       // note: we write the d-gap for the byte number, and the byte (bits[i]) itself, therefore
+                       //       multiplying count by (8+8) or (8+16) or (8+24) etc.:
+                       //       - first 8 for writing bits[i] (1 byte vs. 1 bit), and 
+                       //       - second part for writing the byte-number d-gap as vint. 
+                       // note: factor is for read/write of byte-arrays being faster than vints.  
+                       int factor = 10;
+                       if (bits.Length < (1 << 7))
+                               return factor * (4 + (8 + 8) * Count()) < Size();
+                       if (bits.Length < (1 << 14))
+                               return factor * (4 + (8 + 16) * Count()) < Size();
+                       if (bits.Length < (1 << 21))
+                               return factor * (4 + (8 + 24) * Count()) < Size();
+                       if (bits.Length < (1 << 28))
+                               return factor * (4 + (8 + 32) * Count()) < Size();
+                       return factor * (4 + (8 + 40) * Count()) < Size();
+               }
+               
+               /// <summary>Constructs a bit vector from the file <code>name</code> in Directory
+               /// <code>d</code>, as written by the {@link #write} method.
+               /// </summary>
+               public BitVector(Directory d, System.String name)
+               {
+                       IndexInput input = d.OpenInput(name);
+                       try
+                       {
+                               size = input.ReadInt(); // read size
+                               if (size == - 1)
+                               {
+                                       ReadDgaps(input);
+                               }
+                               else
+                               {
+                                       ReadBits(input);
+                               }
+                       }
+                       finally
+                       {
+                               input.Close();
+                       }
+               }
+               
+               /// <summary>Read as a bit set </summary>
+               private void  ReadBits(IndexInput input)
+               {
+                       count = input.ReadInt(); // read count
+                       bits = new byte[(size >> 3) + 1]; // allocate bits
+                       input.ReadBytes(bits, 0, bits.Length);
+               }
+               
+               /// <summary>read as a d-gaps list </summary>
+               private void  ReadDgaps(IndexInput input)
+               {
+                       size = input.ReadInt(); // (re)read size
+                       count = input.ReadInt(); // read count
+                       bits = new byte[(size >> 3) + 1]; // allocate bits
+                       int last = 0;
+                       int n = Count();
+                       while (n > 0)
+                       {
+                               last += input.ReadVInt();
+                               bits[last] = input.ReadByte();
+                               n -= BYTE_COUNTS[bits[last] & 0xFF];
+                       }
+               }
+               
+               /// <summary> Retrieve a subset of this BitVector.
+               /// 
+               /// </summary>
+               /// <param name="start">starting index, inclusive
+               /// </param>
+               /// <param name="end">ending index, exclusive
+               /// </param>
+               /// <returns> subset
+               /// </returns>
+               public BitVector Subset(int start, int end)
+               {
+                       if (start < 0 || end > Size() || end < start)
+                               throw new System.IndexOutOfRangeException();
+                       // Special case -- return empty vector is start == end
+                       if (end == start)
+                               return new BitVector(0);
+                       byte[] bits = new byte[(SupportClass.Number.URShift((end - start - 1), 3)) + 1];
+                       int s = SupportClass.Number.URShift(start, 3);
+                       for (int i = 0; i < bits.Length; i++)
+                       {
+                               int cur = 0xFF & this.bits[i + s];
+                               int next = i + s + 1 >= this.bits.Length?0:0xFF & this.bits[i + s + 1];
+                               bits[i] = (byte) ((SupportClass.Number.URShift(cur, (start & 7))) | ((next << (8 - (start & 7)))));
+                       }
+                       int bitsToClear = (bits.Length * 8 - (end - start)) % 8;
+                       bits[bits.Length - 1] &= (byte) (~ (0xFF << (8 - bitsToClear)));
+                       return new BitVector(bits, end - start);
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/Cache/Cache.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/Cache/Cache.cs
new file mode 100644 (file)
index 0000000..adcaef0
--- /dev/null
@@ -0,0 +1,118 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Util.Cache
+{
+       
+       
+       /// <summary> Base class for cache implementations.</summary>
+       public abstract class Cache
+       {
+               
+               /// <summary> Simple Cache wrapper that synchronizes all
+               /// calls that access the cache. 
+               /// </summary>
+               internal class SynchronizedCache_Renamed_Class:Cache
+               {
+                       internal System.Object mutex;
+                       internal Cache cache;
+                       
+                       internal SynchronizedCache_Renamed_Class(Cache cache)
+                       {
+                               this.cache = cache;
+                               this.mutex = this;
+                       }
+                       
+                       internal SynchronizedCache_Renamed_Class(Cache cache, System.Object mutex)
+                       {
+                               this.cache = cache;
+                               this.mutex = mutex;
+                       }
+                       
+                       public override void  Put(System.Object key, System.Object value_Renamed)
+                       {
+                               lock (mutex)
+                               {
+                                       cache.Put(key, value_Renamed);
+                               }
+                       }
+                       
+                       public override System.Object Get(System.Object key)
+                       {
+                               lock (mutex)
+                               {
+                                       return cache.Get(key);
+                               }
+                       }
+                       
+                       public override bool ContainsKey(System.Object key)
+                       {
+                               lock (mutex)
+                               {
+                                       return cache.ContainsKey(key);
+                               }
+                       }
+                       
+                       public override void  Close()
+                       {
+                               lock (mutex)
+                               {
+                                       cache.Close();
+                               }
+                       }
+                       
+                       internal override Cache GetSynchronizedCache()
+                       {
+                               return this;
+                       }
+               }
+               
+               /// <summary> Returns a thread-safe cache backed by the specified cache. 
+               /// In order to guarantee thread-safety, all access to the backed cache must
+               /// be accomplished through the returned cache.
+               /// </summary>
+               public static Cache SynchronizedCache(Cache cache)
+               {
+                       return cache.GetSynchronizedCache();
+               }
+               
+               /// <summary> Called by {@link #SynchronizedCache(Cache)}. This method
+               /// returns a {@link SynchronizedCache} instance that wraps
+               /// this instance by default and can be overridden to return
+               /// e. g. subclasses of {@link SynchronizedCache} or this
+               /// in case this cache is already synchronized.
+               /// </summary>
+               internal virtual Cache GetSynchronizedCache()
+               {
+                       return new SynchronizedCache_Renamed_Class(this);
+               }
+               
+               /// <summary> Puts a (key, value)-pair into the cache. </summary>
+               public abstract void  Put(System.Object key, System.Object value_Renamed);
+               
+               /// <summary> Returns the value for the given key. </summary>
+               public abstract System.Object Get(System.Object key);
+               
+               /// <summary> Returns whether the given key is in this cache. </summary>
+               public abstract bool ContainsKey(System.Object key);
+               
+               /// <summary> Closes the cache.</summary>
+               public abstract void  Close();
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/Cache/SimpleLRUCache.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/Cache/SimpleLRUCache.cs
new file mode 100644 (file)
index 0000000..e0bebca
--- /dev/null
@@ -0,0 +1,165 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+using System.Collections.Generic;
+
+namespace Mono.Lucene.Net.Util.Cache
+{
+    public class SimpleLRUCache : SimpleMapCache
+    {
+        /// <summary>
+        /// The maximum number of items to cache.
+        /// </summary>
+        private int capacity;
+
+        /// <summary>
+        /// The list to efficiently maintain the LRU state.
+        /// </summary>
+        private LinkedList<ListValueEntry> list;
+
+        /// <summary>
+        /// The dictionary to hash into any location in the list.
+        /// </summary>
+        private Dictionary<object, LinkedListNode<ListValueEntry>> lookup;
+
+        /// <summary>
+        /// The node instance to use/re-use when adding an item to the cache.
+        /// </summary>
+        private LinkedListNode<ListValueEntry> openNode;
+
+        public SimpleLRUCache(int Capacity)
+        {
+            this.capacity = Capacity;
+            this.list = new LinkedList<ListValueEntry>();
+            this.lookup = new Dictionary<object, LinkedListNode<ListValueEntry>>(Capacity + 1);
+            this.openNode = new LinkedListNode<ListValueEntry>(new ListValueEntry(null, null));
+        }
+
+        public override void Put(object Key, object Value)
+        {
+            if (Get(Key) == null)
+            {
+                this.openNode.Value.ItemKey = Key;
+                this.openNode.Value.ItemValue = Value;
+                this.list.AddFirst(this.openNode);
+                this.lookup.Add(Key, this.openNode);
+
+                if (this.list.Count > this.capacity)
+                {
+                    // last node is to be removed and saved for the next addition to the cache
+                    this.openNode = this.list.Last;
+
+                    // remove from list & dictionary
+                    this.list.RemoveLast();
+                    this.lookup.Remove(this.openNode.Value.ItemKey);
+                }
+                else
+                {
+                    // still filling the cache, create a new open node for the next time
+                    this.openNode = new LinkedListNode<ListValueEntry>(new ListValueEntry(null, null));
+                }
+            }
+        }
+
+        public override object Get(object Key)
+        {
+            LinkedListNode<ListValueEntry> node = null;
+            if(!this.lookup.TryGetValue(Key, out node))
+            {
+                return null;
+            }
+            this.list.Remove(node);
+            this.list.AddFirst(node);
+            return node.Value.ItemValue;
+        }
+
+        /// <summary>
+        /// Container to hold the key and value to aid in removal from 
+        /// the <see cref="lookup"/> dictionary when an item is removed from cache.
+        /// </summary>
+        class ListValueEntry
+        {
+            internal object ItemValue;
+            internal object ItemKey;
+
+            internal ListValueEntry(object key, object value)
+            {
+                this.ItemKey = key;
+                this.ItemValue = value;
+            }
+        }
+    }
+
+
+#region NOT_USED_FROM_JLCA_PORT
+/*
+  
+ //
+ // This is the oringal port as it was generated via JLCA.
+ // This code is not used.  It's here for referance only.
+ //
+  
+
+       /// <summary> Simple LRU cache implementation that uses a LinkedHashMap.
+       /// This cache is not synchronized, use {@link Cache#SynchronizedCache(Cache)}
+       /// if needed.
+       /// 
+       /// </summary>
+       public class SimpleLRUCache:SimpleMapCache
+       {
+               private class AnonymousClassLinkedHashMap : LinkedHashMap
+               {
+                       public AnonymousClassLinkedHashMap(SimpleLRUCache enclosingInstance)
+                       {
+                               InitBlock(enclosingInstance);
+                       }
+                       private void  InitBlock(SimpleLRUCache enclosingInstance)
+                       {
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private SimpleLRUCache enclosingInstance;
+                       public SimpleLRUCache Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       protected internal virtual bool RemoveEldestEntry(System.Collections.DictionaryEntry eldest)
+                       {
+                               return size() > Enclosing_Instance.cacheSize;
+                       }
+               }
+               private const float LOADFACTOR = 0.75f;
+               
+               private int cacheSize;
+               
+               /// <summary> Creates a last-recently-used cache with the specified size. </summary>
+               public SimpleLRUCache(int cacheSize):base(null)
+               {
+                       this.cacheSize = cacheSize;
+                       int capacity = (int) System.Math.Ceiling(cacheSize / LOADFACTOR) + 1;
+                       
+                       base.map = new AnonymousClassLinkedHashMap(this, capacity, LOADFACTOR, true);
+               }
+       }
+*/
+#endregion
+
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/Cache/SimpleMapCache.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/Cache/SimpleMapCache.cs
new file mode 100644 (file)
index 0000000..f3d5868
--- /dev/null
@@ -0,0 +1,128 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Util.Cache
+{
+       
+       /// <summary> Simple cache implementation that uses a HashMap to store (key, value) pairs.
+       /// This cache is not synchronized, use {@link Cache#SynchronizedCache(Cache)}
+       /// if needed.
+       /// </summary>
+       public class SimpleMapCache:Cache
+       {
+               internal System.Collections.IDictionary map;
+               
+               public SimpleMapCache():this(new System.Collections.Hashtable())
+               {
+               }
+               
+               public SimpleMapCache(System.Collections.IDictionary map)
+               {
+                       this.map = map;
+               }
+               
+               public override System.Object Get(System.Object key)
+               {
+                       return map[key];
+               }
+               
+               public override void  Put(System.Object key, System.Object value_Renamed)
+               {
+                       map[key] = value_Renamed;
+               }
+               
+               public override void  Close()
+               {
+                       // NOOP
+               }
+               
+               public override bool ContainsKey(System.Object key)
+               {
+                       return map.Contains(key);
+               }
+               
+               /// <summary> Returns a Set containing all keys in this cache.</summary>
+               public virtual System.Collections.ICollection KeySet()
+               {
+                       return map.Keys;
+               }
+               
+               internal override Cache GetSynchronizedCache()
+               {
+                       return new SynchronizedSimpleMapCache(this);
+               }
+               
+               private class SynchronizedSimpleMapCache:SimpleMapCache
+               {
+                       internal System.Object mutex;
+                       internal SimpleMapCache cache;
+                       
+                       internal SynchronizedSimpleMapCache(SimpleMapCache cache)
+                       {
+                               this.cache = cache;
+                               this.mutex = this;
+                       }
+                       
+                       public override void  Put(System.Object key, System.Object value_Renamed)
+                       {
+                               lock (mutex)
+                               {
+                                       cache.Put(key, value_Renamed);
+                               }
+                       }
+                       
+                       public override System.Object Get(System.Object key)
+                       {
+                               lock (mutex)
+                               {
+                                       return cache.Get(key);
+                               }
+                       }
+                       
+                       public override bool ContainsKey(System.Object key)
+                       {
+                               lock (mutex)
+                               {
+                                       return cache.ContainsKey(key);
+                               }
+                       }
+                       
+                       public override void  Close()
+                       {
+                               lock (mutex)
+                               {
+                                       cache.Close();
+                               }
+                       }
+                       
+                       public override System.Collections.ICollection KeySet()
+                       {
+                               lock (mutex)
+                               {
+                                       return cache.KeySet();
+                               }
+                       }
+                       
+                       internal override Cache GetSynchronizedCache()
+                       {
+                               return this;
+                       }
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/CloseableThreadLocal.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/CloseableThreadLocal.cs
new file mode 100644 (file)
index 0000000..afa95b0
--- /dev/null
@@ -0,0 +1,181 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+using System.Collections.Generic;
+using System.Threading;
+
+namespace Mono.Lucene.Net.Util
+{
+
+    /// <summary>Java's builtin ThreadLocal has a serious flaw:
+    /// it can take an arbitrarily long amount of time to
+    /// dereference the things you had stored in it, even once the
+    /// ThreadLocal instance itself is no longer referenced.
+    /// This is because there is single, master map stored for
+    /// each thread, which all ThreadLocals share, and that
+    /// master map only periodically purges "stale" entries.
+    /// 
+    /// While not technically a memory leak, because eventually
+    /// the memory will be reclaimed, it can take a long time
+    /// and you can easily hit OutOfMemoryError because from the
+    /// GC's standpoint the stale entries are not reclaimaible.
+    /// 
+    /// This class works around that, by only enrolling
+    /// WeakReference values into the ThreadLocal, and
+    /// separately holding a hard reference to each stored
+    /// value.  When you call {@link #close}, these hard
+    /// references are cleared and then GC is freely able to
+    /// reclaim space by objects stored in it. 
+    /// </summary>
+    /// 
+
+    public class CloseableThreadLocal
+    {
+        private ThreadLocal<WeakReference> t = new ThreadLocal<WeakReference>();
+
+        private Dictionary<Thread, object> hardRefs = new Dictionary<Thread, object>();
+
+
+        public virtual object InitialValue()
+        {
+            return null;
+        }
+
+        public virtual object Get()
+        {
+            WeakReference weakRef = t.Get();
+            if (weakRef == null)
+            {
+                object iv = InitialValue();
+                if (iv != null)
+                {
+                    Set(iv);
+                    return iv;
+                }
+                else
+                    return null;
+            }
+            else
+            {
+                return weakRef.Get();
+            }
+        }
+
+        public virtual void Set(object @object)
+        {
+            //+-- For Debuging
+            if (SupportClass.CloseableThreadLocalProfiler.EnableCloseableThreadLocalProfiler == true)
+            {
+                lock (SupportClass.CloseableThreadLocalProfiler.Instances)
+                {
+                    SupportClass.CloseableThreadLocalProfiler.Instances.Add(new WeakReference(@object));
+                }
+            }
+            //+--
+
+            t.Set(new WeakReference(@object));
+
+            lock (hardRefs)
+            {
+                //hardRefs[Thread.CurrentThread] = @object;
+                hardRefs.Add(Thread.CurrentThread, @object);
+
+                // Purge dead threads
+                foreach (var thread in new List<Thread>(hardRefs.Keys))
+                {
+                    if (!thread.IsAlive)
+                        hardRefs.Remove(thread);
+                }
+
+            }
+        }
+
+        public virtual void Close()
+        {
+            // Clear the hard refs; then, the only remaining refs to
+            // all values we were storing are weak (unless somewhere
+            // else is still using them) and so GC may reclaim them:
+            hardRefs = null;
+            // Take care of the current thread right now; others will be
+            // taken care of via the WeakReferences.
+            if (t != null)
+            {
+                t.Remove();
+            }
+            t = null;
+        }
+    }
+
+    internal static class CloseableThreadLocalExtensions
+    {
+        public static void Set<T>(this ThreadLocal<T> t, T val)
+        {
+            t.Value = val;
+        }
+
+        public static T Get<T>(this ThreadLocal<T> t)
+        {
+            return t.Value;
+        }
+
+        public static void Remove<T>(this ThreadLocal<T> t)
+        {
+            t.Dispose();
+        }
+
+        public static object Get(this WeakReference w)
+        {
+            return w.Target;
+        }
+    }
+
+    //// {{DIGY}}
+    //// To compile against Framework 2.0
+    //// Uncomment below class
+#if NET_2_0
+    public class ThreadLocal<T> : IDisposable
+    {
+       [ThreadStatic]
+       static SupportClass.WeakHashTable slots;
+
+       void Init()
+       {
+           if (slots == null) slots = new SupportClass.WeakHashTable();
+       }
+
+       public T Value
+       {
+           set
+           {
+               Init();
+               slots.Add(this, value);
+           }
+           get
+           {
+               Init();
+               return (T)slots[this];
+           }
+       }
+
+       public void Dispose()
+       {
+           slots.Remove(this);
+       }
+    }
+#endif
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/Constants.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/Constants.cs
new file mode 100644 (file)
index 0000000..25ccf56
--- /dev/null
@@ -0,0 +1,114 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using LucenePackage = Mono.Lucene.Net.LucenePackage;
+
+namespace Mono.Lucene.Net.Util
+{
+       
+       /// <summary> Some useful constants.
+       /// 
+       /// 
+       /// </summary>
+       /// <version>  $Id: Constants.java 828327 2009-10-22 06:47:40Z uschindler $
+       /// 
+       /// </version>
+       
+       public sealed class Constants
+       {
+               private Constants()
+               {
+               } // can't construct
+               
+               /// <summary>The value of <tt>System.getProperty("java.version")</tt>. *</summary>
+               public static readonly System.String JAVA_VERSION = SupportClass.AppSettings.Get("java.version", "");
+               /// <summary>True iff this is Java version 1.1. </summary>
+               public static readonly bool JAVA_1_1 = JAVA_VERSION.StartsWith("1.1.");
+               /// <summary>True iff this is Java version 1.2. </summary>
+               public static readonly bool JAVA_1_2 = JAVA_VERSION.StartsWith("1.2.");
+               /// <summary>True iff this is Java version 1.3. </summary>
+               public static readonly bool JAVA_1_3 = JAVA_VERSION.StartsWith("1.3.");
+               
+               /// <summary>The value of <tt>System.getProperty("os.name")</tt>. *</summary>
+               public static readonly System.String OS_NAME = GetEnvironmentVariable("OS","Windows_NT") ?? "Linux";
+               /// <summary>True iff running on Linux. </summary>
+               public static readonly bool LINUX = OS_NAME.StartsWith("Linux");
+               /// <summary>True iff running on Windows. </summary>
+               public static readonly bool WINDOWS = OS_NAME.StartsWith("Windows");
+               /// <summary>True iff running on SunOS. </summary>
+               public static readonly bool SUN_OS = OS_NAME.StartsWith("SunOS");
+               
+               public static readonly System.String OS_ARCH = GetEnvironmentVariable("PROCESSOR_ARCHITECTURE","x86");
+        public static readonly System.String OS_VERSION = GetEnvironmentVariable("OS_VERSION", "?");
+               public static readonly System.String JAVA_VENDOR = SupportClass.AppSettings.Get("java.vendor", "");
+               
+               // NOTE: this logic may not be correct; if you know of a
+               // more reliable approach please raise it on java-dev!
+               public static bool JRE_IS_64BIT;
+
+        // this method prevents inlining the final version constant in compiled
+        // classes,
+        // see: http://www.javaworld.com/community/node/3400
+        private static System.String Ident(System.String s)
+        {
+            return s.ToString();
+        }
+
+               public static readonly System.String LUCENE_MAIN_VERSION = Ident("2.9.4");
+               
+               public static System.String LUCENE_VERSION="8.8.8.8";
+               static Constants()
+               {
+            if (IntPtr.Size == 8)
+            {
+                JRE_IS_64BIT = true;// 64 bit machine
+            }
+            else if (IntPtr.Size == 4)
+            {
+                JRE_IS_64BIT = false;// 32 bit machine
+            }
+
+            try
+            {
+                LUCENE_VERSION = System.Reflection.Assembly.GetExecutingAssembly().GetName().Version.ToString();
+            }
+            catch (System.Security.SecurityException) //Ignore in medium trust.
+            {
+            }
+
+        }
+
+        #region MEDIUM-TRUST Support
+        static string GetEnvironmentVariable(string variable, string defaultValueOnSecurityException)
+        {
+            try
+            {
+                if (variable == "OS_VERSION") return System.Environment.OSVersion.ToString();
+
+                return System.Environment.GetEnvironmentVariable(variable);
+            }
+            catch (System.Security.SecurityException)
+            {
+                return defaultValueOnSecurityException;
+            }
+
+        }
+        #endregion
+    }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/DocIdBitSet.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/DocIdBitSet.cs
new file mode 100644 (file)
index 0000000..d8c5d01
--- /dev/null
@@ -0,0 +1,114 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using DocIdSet = Mono.Lucene.Net.Search.DocIdSet;
+using DocIdSetIterator = Mono.Lucene.Net.Search.DocIdSetIterator;
+
+namespace Mono.Lucene.Net.Util
+{
+       
+       
+       /// <summary>Simple DocIdSet and DocIdSetIterator backed by a BitSet </summary>
+       public class DocIdBitSet:DocIdSet
+       {
+               private System.Collections.BitArray bitSet;
+               
+               public DocIdBitSet(System.Collections.BitArray bitSet)
+               {
+                       this.bitSet = bitSet;
+               }
+               
+               public override DocIdSetIterator Iterator()
+               {
+                       return new DocIdBitSetIterator(bitSet);
+               }
+
+               /// <summary>This DocIdSet implementation is cacheable.</summary>
+               public override bool IsCacheable()
+               {
+                       return true;
+               }
+               
+               /// <summary> Returns the underlying BitSet. </summary>
+               public virtual System.Collections.BitArray GetBitSet()
+               {
+                       return this.bitSet;
+               }
+               
+               private class DocIdBitSetIterator:DocIdSetIterator
+               {
+                       private int docId;
+                       private System.Collections.BitArray bitSet;
+                       
+                       internal DocIdBitSetIterator(System.Collections.BitArray bitSet)
+                       {
+                               this.bitSet = bitSet;
+                               this.docId = - 1;
+                       }
+                       
+                       /// <deprecated> use {@link #DocID()} instead. 
+                       /// </deprecated>
+            [Obsolete("use DocID() instead.")]
+                       public override int Doc()
+                       {
+                               System.Diagnostics.Debug.Assert(docId != - 1);
+                               return docId;
+                       }
+                       
+                       public override int DocID()
+                       {
+                               return docId;
+                       }
+                       
+                       /// <deprecated> use {@link #NextDoc()} instead. 
+                       /// </deprecated>
+            [Obsolete("use NextDoc() instead.")]
+                       public override bool Next()
+                       {
+                               // (docId + 1) on next line requires -1 initial value for docNr:
+                               return NextDoc() != NO_MORE_DOCS;
+                       }
+                       
+                       public override int NextDoc()
+                       {
+                               // (docId + 1) on next line requires -1 initial value for docNr:
+                               int d = SupportClass.BitSetSupport.NextSetBit(bitSet, docId + 1);
+                               // -1 returned by BitSet.nextSetBit() when exhausted
+                               docId = d == - 1?NO_MORE_DOCS:d;
+                               return docId;
+                       }
+                       
+                       /// <deprecated> use {@link #Advance(int)} instead. 
+                       /// </deprecated>
+            [Obsolete("use Advance(int) instead.")]
+                       public override bool SkipTo(int skipDocNr)
+                       {
+                               return Advance(skipDocNr) != NO_MORE_DOCS;
+                       }
+                       
+                       public override int Advance(int target)
+                       {
+                               int d = SupportClass.BitSetSupport.NextSetBit(bitSet, target);
+                               // -1 returned by BitSet.nextSetBit() when exhausted
+                               docId = d == - 1?NO_MORE_DOCS:d;
+                               return docId;
+                       }
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/FieldCacheSanityChecker.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/FieldCacheSanityChecker.cs
new file mode 100644 (file)
index 0000000..b10fbf2
--- /dev/null
@@ -0,0 +1,444 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+using System.Collections.Generic;
+
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+using FieldCache = Mono.Lucene.Net.Search.FieldCache;
+using CacheEntry = Mono.Lucene.Net.Search.CacheEntry;
+
+namespace Mono.Lucene.Net.Util
+{
+       
+       /// <summary> Provides methods for sanity checking that entries in the FieldCache 
+       /// are not wasteful or inconsistent.
+       /// <p/>
+       /// <p/>
+       /// Lucene 2.9 Introduced numerous enhancements into how the FieldCache 
+       /// is used by the low levels of Lucene searching (for Sorting and 
+       /// ValueSourceQueries) to improve both the speed for Sorting, as well 
+       /// as reopening of IndexReaders.  But these changes have shifted the 
+       /// usage of FieldCache from "top level" IndexReaders (frequently a 
+       /// MultiReader or DirectoryReader) down to the leaf level SegmentReaders.  
+       /// As a result, existing applications that directly access the FieldCache 
+       /// may find RAM usage increase significantly when upgrading to 2.9 or 
+       /// Later.  This class provides an API for these applications (or their 
+       /// Unit tests) to check at run time if the FieldCache contains "insane" 
+       /// usages of the FieldCache.
+       /// <p/>
+       /// <p/>
+       /// <b>EXPERIMENTAL API:</b> This API is considered extremely advanced and 
+       /// experimental.  It may be removed or altered w/o warning in future releases 
+       /// of Lucene.
+       /// <p/>
+       /// </summary>
+       /// <seealso cref="FieldCache">
+       /// </seealso>
+       /// <seealso cref="FieldCacheSanityChecker.Insanity">
+       /// </seealso>
+       /// <seealso cref="FieldCacheSanityChecker.InsanityType">
+       /// </seealso>
+       public sealed class FieldCacheSanityChecker
+       {
+               
+               private RamUsageEstimator ramCalc = null;
+               public FieldCacheSanityChecker()
+               {
+                       /* NOOP */
+               }
+               /// <summary> If set, will be used to estimate size for all CacheEntry objects 
+               /// dealt with.
+               /// </summary>
+               public void  SetRamUsageEstimator(RamUsageEstimator r)
+               {
+                       ramCalc = r;
+               }
+               
+               
+               /// <summary> Quick and dirty convenience method</summary>
+               /// <seealso cref="check">
+               /// </seealso>
+               public static Insanity[] CheckSanity(FieldCache cache)
+               {
+                       return CheckSanity(cache.GetCacheEntries());
+               }
+               
+               /// <summary> Quick and dirty convenience method that instantiates an instance with 
+               /// "good defaults" and uses it to test the CacheEntry[]
+               /// </summary>
+               /// <seealso cref="check">
+               /// </seealso>
+               public static Insanity[] CheckSanity(CacheEntry[] cacheEntries)
+               {
+                       FieldCacheSanityChecker sanityChecker = new FieldCacheSanityChecker();
+                       // doesn't check for interned
+                       sanityChecker.SetRamUsageEstimator(new RamUsageEstimator(false));
+                       return sanityChecker.Check(cacheEntries);
+               }
+               
+               
+               /// <summary> Tests a CacheEntry[] for indication of "insane" cache usage.
+               /// <p/>
+               /// NOTE:FieldCache CreationPlaceholder objects are ignored.
+               /// (:TODO: is this a bad idea? are we masking a real problem?)
+               /// <p/>
+               /// </summary>
+               public Insanity[] Check(CacheEntry[] cacheEntries)
+               {
+                       if (null == cacheEntries || 0 == cacheEntries.Length)
+                               return new Insanity[0];
+                       
+                       if (null != ramCalc)
+                       {
+                               for (int i = 0; i < cacheEntries.Length; i++)
+                               {
+                                       cacheEntries[i].EstimateSize(ramCalc);
+                               }
+                       }
+                       
+                       // the indirect mapping lets MapOfSet dedup identical valIds for us
+                       //
+                       // maps the (valId) identityhashCode of cache values to 
+                       // sets of CacheEntry instances
+                       MapOfSets<int,CacheEntry> valIdToItems = new MapOfSets<int,CacheEntry>(new Dictionary<int,Dictionary<CacheEntry,CacheEntry>>(17));
+                       // maps ReaderField keys to Sets of ValueIds
+                       MapOfSets<ReaderField,int> readerFieldToValIds = new MapOfSets<ReaderField,int>(new Dictionary<ReaderField,Dictionary<int,int>>(17));
+                       //
+                       
+                       // any keys that we know result in more then one valId
+            // TODO: This will be a HashSet<T> when we start using .NET Framework 3.5
+            Dictionary<ReaderField, ReaderField> valMismatchKeys = new Dictionary<ReaderField, ReaderField>();
+                       
+                       // iterate over all the cacheEntries to get the mappings we'll need
+                       for (int i = 0; i < cacheEntries.Length; i++)
+                       {
+                               CacheEntry item = cacheEntries[i];
+                               System.Object val = item.GetValue();
+                               
+                               if (val is Mono.Lucene.Net.Search.CreationPlaceholder)
+                                       continue;
+                               
+                               ReaderField rf = new ReaderField(item.GetReaderKey(), item.GetFieldName());
+                               
+                               System.Int32 valId = val.GetHashCode();
+                               
+                               // indirect mapping, so the MapOfSet will dedup identical valIds for us
+                               valIdToItems.Put(valId, item);
+                               if (1 < readerFieldToValIds.Put(rf, valId))
+                               {
+                    if (!valMismatchKeys.ContainsKey(rf))
+                    {
+                        valMismatchKeys.Add(rf, rf);
+                    }
+                               }
+                       }
+                       
+                       List<Insanity> insanity = new List<Insanity>(valMismatchKeys.Count * 3);
+                       
+                       insanity.AddRange(CheckValueMismatch(valIdToItems, readerFieldToValIds, valMismatchKeys));
+                       insanity.AddRange(CheckSubreaders(valIdToItems, readerFieldToValIds));
+                       
+                       return insanity.ToArray();
+               }
+               
+               /// <summary> Internal helper method used by check that iterates over 
+               /// valMismatchKeys and generates a Collection of Insanity 
+               /// instances accordingly.  The MapOfSets are used to populate 
+               /// the Insantiy objects. 
+               /// </summary>
+               /// <seealso cref="InsanityType.VALUEMISMATCH">
+               /// </seealso>
+               private List<Insanity> CheckValueMismatch(MapOfSets<int,CacheEntry> valIdToItems, MapOfSets<ReaderField,int> readerFieldToValIds, Dictionary<ReaderField,ReaderField> valMismatchKeys)
+               {
+                       
+                       List<Insanity> insanity = new List<Insanity>(valMismatchKeys.Count * 3);
+                       
+                       if (!(valMismatchKeys.Count == 0))
+                       {
+                               // we have multiple values for some ReaderFields
+                               
+                IDictionary<ReaderField,Dictionary<int,int>> rfMap = readerFieldToValIds.GetMap();
+                IDictionary<int,Dictionary<CacheEntry,CacheEntry>> valMap = valIdToItems.GetMap();
+                foreach (ReaderField rf in valMismatchKeys.Keys)
+                {
+                    List<CacheEntry> badEntries = new List<CacheEntry>(valMismatchKeys.Count * 2);
+                    foreach (int val in rfMap[rf].Keys)
+                    {
+                        foreach (CacheEntry entry in valMap[val].Keys)
+                        {
+                            badEntries.Add(entry);
+                        }
+                    }
+
+                    insanity.Add(new Insanity(InsanityType.VALUEMISMATCH, "Multiple distinct value objects for " + rf.ToString(), badEntries.ToArray()));
+                }
+            }
+                       return insanity;
+               }
+               
+               /// <summary> Internal helper method used by check that iterates over 
+               /// the keys of readerFieldToValIds and generates a Collection 
+               /// of Insanity instances whenever two (or more) ReaderField instances are 
+               /// found that have an ancestery relationships.  
+               /// 
+               /// </summary>
+               /// <seealso cref="InsanityType.SUBREADER">
+               /// </seealso>
+               private List<Insanity> CheckSubreaders(MapOfSets<int,CacheEntry> valIdToItems, MapOfSets<ReaderField,int> readerFieldToValIds)
+               {
+                       
+            List<Insanity> insanity = new List<Insanity>(23);
+
+            Dictionary<ReaderField, Dictionary<ReaderField, ReaderField>> badChildren = new Dictionary<ReaderField, Dictionary<ReaderField, ReaderField>>(17);
+                       MapOfSets<ReaderField, ReaderField> badKids = new MapOfSets<ReaderField, ReaderField>(badChildren); // wrapper
+
+            IDictionary<int, Dictionary<CacheEntry, CacheEntry>> viToItemSets = valIdToItems.GetMap();
+            IDictionary<ReaderField, Dictionary<int, int>> rfToValIdSets = readerFieldToValIds.GetMap();
+
+            Dictionary<ReaderField, ReaderField> seen = new Dictionary<ReaderField, ReaderField>(17);
+
+            foreach (ReaderField rf in rfToValIdSets.Keys)
+            {
+                if (seen.ContainsKey(rf))
+                    continue;
+
+                System.Collections.IList kids = GetAllDecendentReaderKeys(rf.readerKey);
+                               for (int i = 0; i < kids.Count; i++)
+                               {
+                                       ReaderField kid = new ReaderField(kids[i], rf.fieldName);
+
+                                       if (badChildren.ContainsKey(kid))
+                                       {
+                                               // we've already process this kid as RF and found other problems
+                                               // track those problems as our own
+                                               badKids.Put(rf, kid);
+                                               badKids.PutAll(rf, badChildren[kid]);
+                                               badChildren.Remove(kid);
+                                       }
+                                       else if (rfToValIdSets.ContainsKey(kid))
+                                       {
+                                               // we have cache entries for the kid
+                                               badKids.Put(rf, kid);
+                                       }
+                    if (!seen.ContainsKey(kid))
+                    {
+                        seen.Add(kid, kid);
+                    }
+                               }
+                if (!seen.ContainsKey(rf))
+                {
+                    seen.Add(rf, rf);
+                }
+                       }
+                       
+                       // every mapping in badKids represents an Insanity
+                       foreach (ReaderField parent in badChildren.Keys)
+                       {
+                               Dictionary<ReaderField,ReaderField> kids = badChildren[parent];
+                               
+                               List<CacheEntry> badEntries = new List<CacheEntry>(kids.Count * 2);
+                               
+                               // put parent entr(ies) in first
+                               {
+                                       foreach (int val in rfToValIdSets[parent].Keys)
+                                       {
+                                               badEntries.AddRange(viToItemSets[val].Keys);
+                                       }
+                               }
+                               
+                               // now the entries for the descendants
+                               foreach (ReaderField kid in kids.Keys)
+                               {
+                                       foreach (int val in rfToValIdSets[kid].Keys)
+                                       {
+                                               badEntries.AddRange(viToItemSets[val].Keys);
+                                       }
+                               }
+                               
+                               insanity.Add(new Insanity(InsanityType.SUBREADER, "Found caches for decendents of " + parent.ToString(), badEntries.ToArray()));
+                       }
+                       
+                       return insanity;
+               }
+               
+               /// <summary> Checks if the seed is an IndexReader, and if so will walk
+               /// the hierarchy of subReaders building up a list of the objects 
+               /// returned by obj.getFieldCacheKey()
+               /// </summary>
+               private System.Collections.IList GetAllDecendentReaderKeys(System.Object seed)
+               {
+                       System.Collections.IList all = new System.Collections.ArrayList(17); // will grow as we iter
+                       all.Add(seed);
+                       for (int i = 0; i < all.Count; i++)
+                       {
+                               System.Object obj = all[i];
+                               if (obj is IndexReader)
+                               {
+                                       IndexReader[] subs = ((IndexReader) obj).GetSequentialSubReaders();
+                                       for (int j = 0; (null != subs) && (j < subs.Length); j++)
+                                       {
+                                               all.Add(subs[j].GetFieldCacheKey());
+                                       }
+                               }
+                       }
+                       // need to skip the first, because it was the seed
+                       return (System.Collections.IList) ((System.Collections.ArrayList) all).GetRange(1, all.Count - 1);
+               }
+               
+               /// <summary> Simple pair object for using "readerKey + fieldName" a Map key</summary>
+               private sealed class ReaderField
+               {
+                       public System.Object readerKey;
+                       public System.String fieldName;
+                       public ReaderField(System.Object readerKey, System.String fieldName)
+                       {
+                               this.readerKey = readerKey;
+                               this.fieldName = fieldName;
+                       }
+                       public override int GetHashCode()
+                       {
+                               return readerKey.GetHashCode() * fieldName.GetHashCode();
+                       }
+                       public  override bool Equals(System.Object that)
+                       {
+                               if (!(that is ReaderField))
+                                       return false;
+                               
+                               ReaderField other = (ReaderField) that;
+                               return (this.readerKey == other.readerKey && this.fieldName.Equals(other.fieldName));
+                       }
+                       public override System.String ToString()
+                       {
+                               return readerKey.ToString() + "+" + fieldName;
+                       }
+               }
+               
+               /// <summary> Simple container for a collection of related CacheEntry objects that 
+               /// in conjunction with eachother represent some "insane" usage of the 
+               /// FieldCache.
+               /// </summary>
+               public sealed class Insanity
+               {
+                       private InsanityType type;
+                       private System.String msg;
+                       private CacheEntry[] entries;
+                       public Insanity(InsanityType type, System.String msg, CacheEntry[] entries)
+                       {
+                               if (null == type)
+                               {
+                                       throw new System.ArgumentException("Insanity requires non-null InsanityType");
+                               }
+                               if (null == entries || 0 == entries.Length)
+                               {
+                                       throw new System.ArgumentException("Insanity requires non-null/non-empty CacheEntry[]");
+                               }
+                               this.type = type;
+                               this.msg = msg;
+                               this.entries = entries;
+                       }
+                       /// <summary> Type of insane behavior this object represents</summary>
+                       public new InsanityType GetType()
+                       {
+                               return type;
+                       }
+                       /// <summary> Description of hte insane behavior</summary>
+                       public System.String GetMsg()
+                       {
+                               return msg;
+                       }
+                       /// <summary> CacheEntry objects which suggest a problem</summary>
+                       public CacheEntry[] GetCacheEntries()
+                       {
+                               return entries;
+                       }
+                       /// <summary> Multi-Line representation of this Insanity object, starting with 
+                       /// the Type and Msg, followed by each CacheEntry.toString() on it's 
+                       /// own line prefaced by a tab character
+                       /// </summary>
+                       public override System.String ToString()
+                       {
+                               System.Text.StringBuilder buf = new System.Text.StringBuilder();
+                               buf.Append(GetType()).Append(": ");
+                               
+                               System.String m = GetMsg();
+                               if (null != m)
+                                       buf.Append(m);
+                               
+                               buf.Append('\n');
+                               
+                               CacheEntry[] ce = GetCacheEntries();
+                               for (int i = 0; i < ce.Length; i++)
+                               {
+                                       buf.Append('\t').Append(ce[i].ToString()).Append('\n');
+                               }
+                               
+                               return buf.ToString();
+                       }
+               }
+               
+               /// <summary> An Enumaration of the differnet types of "insane" behavior that 
+               /// may be detected in a FieldCache.
+               /// 
+               /// </summary>
+               /// <seealso cref="InsanityType.SUBREADER">
+               /// </seealso>
+               /// <seealso cref="InsanityType.VALUEMISMATCH">
+               /// </seealso>
+               /// <seealso cref="InsanityType.EXPECTED">
+               /// </seealso>
+               public sealed class InsanityType
+               {
+                       private System.String label;
+                       internal InsanityType(System.String label)
+                       {
+                               this.label = label;
+                       }
+                       public override System.String ToString()
+                       {
+                               return label;
+                       }
+                       
+                       /// <summary> Indicates an overlap in cache usage on a given field 
+                       /// in sub/super readers.
+                       /// </summary>
+                       public static readonly InsanityType SUBREADER = new InsanityType("SUBREADER");
+                       
+                       /// <summary> <p/>
+                       /// Indicates entries have the same reader+fieldname but 
+                       /// different cached values.  This can happen if different datatypes, 
+                       /// or parsers are used -- and while it's not necessarily a bug 
+                       /// it's typically an indication of a possible problem.
+                       /// <p/>
+                       /// <p/>
+                       /// PNOTE: Only the reader, fieldname, and cached value are actually 
+                       /// tested -- if two cache entries have different parsers or datatypes but 
+                       /// the cached values are the same Object (== not just equal()) this method 
+                       /// does not consider that a red flag.  This allows for subtle variations 
+                       /// in the way a Parser is specified (null vs DEFAULT_LONG_PARSER, etc...)
+                       /// <p/>
+                       /// </summary>
+                       public static readonly InsanityType VALUEMISMATCH = new InsanityType("VALUEMISMATCH");
+                       
+                       /// <summary> Indicates an expected bit of "insanity".  This may be useful for 
+                       /// clients that wish to preserve/log information about insane usage 
+                       /// but indicate that it was expected. 
+                       /// </summary>
+                       public static readonly InsanityType EXPECTED = new InsanityType("EXPECTED");
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/IndexableBinaryStringTools.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/IndexableBinaryStringTools.cs
new file mode 100644 (file)
index 0000000..06545d8
--- /dev/null
@@ -0,0 +1,341 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+// {{Aroush-2.9}} Port issue?  Both of those were treated as: System.IO.MemoryStream
+//using CharBuffer = java.nio.CharBuffer;
+//using ByteBuffer = java.nio.ByteBuffer;
+
+namespace Mono.Lucene.Net.Util
+{
+       
+       /// <summary> Provides support for converting byte sequences to Strings and back again.
+       /// The resulting Strings preserve the original byte sequences' sort order.
+       /// 
+       /// The Strings are constructed using a Base 8000h encoding of the original
+       /// binary data - each char of an encoded String represents a 15-bit chunk
+       /// from the byte sequence.  Base 8000h was chosen because it allows for all
+       /// lower 15 bits of char to be used without restriction; the surrogate range 
+       /// [U+D8000-U+DFFF] does not represent valid chars, and would require
+       /// complicated handling to avoid them and allow use of char's high bit.
+       /// 
+       /// Although unset bits are used as padding in the final char, the original
+       /// byte sequence could contain trailing bytes with no set bits (null bytes):
+       /// padding is indistinguishable from valid information.  To overcome this
+       /// problem, a char is appended, indicating the number of encoded bytes in the
+       /// final content char.
+       /// 
+       /// This class's operations are defined over CharBuffers and ByteBuffers, to
+       /// allow for wrapped arrays to be reused, reducing memory allocation costs for
+       /// repeated operations.  Note that this class calls array() and arrayOffset()
+       /// on the CharBuffers and ByteBuffers it uses, so only wrapped arrays may be
+       /// used.  This class interprets the arrayOffset() and limit() values returned by
+       /// its input buffers as beginning and end+1 positions on the wrapped array,
+       /// resprectively; similarly, on the output buffer, arrayOffset() is the first
+       /// position written to, and limit() is set to one past the final output array
+       /// position.
+       /// </summary>
+       public class IndexableBinaryStringTools
+       {
+               
+               private static readonly CodingCase[] CODING_CASES = new CodingCase[]{new CodingCase(7, 1), new CodingCase(14, 6, 2), new CodingCase(13, 5, 3), new CodingCase(12, 4, 4), new CodingCase(11, 3, 5), new CodingCase(10, 2, 6), new CodingCase(9, 1, 7), new CodingCase(8, 0)};
+               
+               // Export only static methods
+               private IndexableBinaryStringTools()
+               {
+               }
+               
+               /// <summary> Returns the number of chars required to encode the given byte sequence.
+               /// 
+               /// </summary>
+               /// <param name="original">The byte sequence to be encoded.  Must be backed by an array.
+               /// </param>
+               /// <returns> The number of chars required to encode the given byte sequence
+               /// </returns>
+               /// <throws>  IllegalArgumentException If the given ByteBuffer is not backed by an array </throws>
+               public static int GetEncodedLength(System.Collections.Generic.List<byte> original)
+               {
+            return (original.Count == 0) ? 0 : ((original.Count * 8 + 14) / 15) + 1;
+               }
+               
+               /// <summary> Returns the number of bytes required to decode the given char sequence.
+               /// 
+               /// </summary>
+               /// <param name="encoded">The char sequence to be encoded.  Must be backed by an array.
+               /// </param>
+               /// <returns> The number of bytes required to decode the given char sequence
+               /// </returns>
+               /// <throws>  IllegalArgumentException If the given CharBuffer is not backed by an array </throws>
+        public static int GetDecodedLength(System.Collections.Generic.List<char> encoded)
+               {
+            int numChars = encoded.Count - 1;
+            if (numChars <= 0)
+            {
+                return 0;
+            }
+            else
+            {
+                int numFullBytesInFinalChar = encoded[encoded.Count - 1];
+                int numEncodedChars = numChars - 1;
+                return ((numEncodedChars * 15 + 7) / 8 + numFullBytesInFinalChar);
+            }
+               }
+               
+               /// <summary> Encodes the input byte sequence into the output char sequence.  Before
+               /// calling this method, ensure that the output CharBuffer has sufficient
+               /// capacity by calling {@link #GetEncodedLength(java.nio.ByteBuffer)}.
+               /// 
+               /// </summary>
+               /// <param name="input">The byte sequence to encode
+               /// </param>
+               /// <param name="output">Where the char sequence encoding result will go.  The limit
+               /// is set to one past the position of the final char.
+               /// </param>
+               /// <throws>  IllegalArgumentException If either the input or the output buffer </throws>
+               /// <summary>  is not backed by an array
+               /// </summary>
+               public static void  Encode(System.Collections.Generic.List<byte> input, System.Collections.Generic.List<char> output)
+               {
+            int outputLength = GetEncodedLength(input);
+            // only adjust capacity if needed
+            if (output.Capacity < outputLength)
+            {
+                output.Capacity = outputLength;
+            }
+
+            // ensure the buffer we are writing into is occupied with nulls
+            if (output.Count < outputLength)
+            {
+                for (int i = output.Count; i < outputLength; i++)
+                {
+                    output.Add(Char.MinValue);
+                }
+            }
+
+            if (input.Count > 0)
+            {
+                int inputByteNum = 0;
+                int caseNum = 0;
+                int outputCharNum = 0;
+                CodingCase codingCase;
+                for (; inputByteNum + CODING_CASES[caseNum].numBytes <= input.Count; ++outputCharNum)
+                {
+                    codingCase = CODING_CASES[caseNum];
+                    if (2 == codingCase.numBytes)
+                    {
+                        output[outputCharNum] = (char)(((input[inputByteNum] & 0xFF) << codingCase.initialShift) + ((SupportClass.Number.URShift((input[inputByteNum + 1] & 0xFF), codingCase.finalShift)) & codingCase.finalMask) & (short)0x7FFF);
+                    }
+                    else
+                    {
+                        // numBytes is 3
+                        output[outputCharNum] = (char)(((input[inputByteNum] & 0xFF) << codingCase.initialShift) + ((input[inputByteNum + 1] & 0xFF) << codingCase.middleShift) + ((SupportClass.Number.URShift((input[inputByteNum + 2] & 0xFF), codingCase.finalShift)) & codingCase.finalMask) & (short)0x7FFF);
+                    }
+                    inputByteNum += codingCase.advanceBytes;
+                    if (++caseNum == CODING_CASES.Length)
+                    {
+                        caseNum = 0;
+                    }
+                }
+                // Produce final char (if any) and trailing count chars.
+                codingCase = CODING_CASES[caseNum];
+                
+                if (inputByteNum + 1 < input.Count)
+                {
+                    // codingCase.numBytes must be 3
+                    output[outputCharNum++] = (char) ((((input[inputByteNum] & 0xFF) << codingCase.initialShift) + ((input[inputByteNum + 1] & 0xFF) << codingCase.middleShift)) & (short) 0x7FFF);
+                    // Add trailing char containing the number of full bytes in final char
+                    output[outputCharNum++] = (char) 1;
+                }
+                else if (inputByteNum < input.Count)
+                {
+                    output[outputCharNum++] = (char) (((input[inputByteNum] & 0xFF) << codingCase.initialShift) & (short) 0x7FFF);
+                    // Add trailing char containing the number of full bytes in final char
+                    output[outputCharNum++] = caseNum == 0?(char) 1:(char) 0;
+                }
+                else
+                {
+                    // No left over bits - last char is completely filled.
+                    // Add trailing char containing the number of full bytes in final char
+                    output[outputCharNum++] = (char) 1;
+                }
+            }
+               }
+               
+               /// <summary> Decodes the input char sequence into the output byte sequence.  Before
+               /// calling this method, ensure that the output ByteBuffer has sufficient
+               /// capacity by calling {@link #GetDecodedLength(java.nio.CharBuffer)}.
+               /// 
+               /// </summary>
+               /// <param name="input">The char sequence to decode
+               /// </param>
+               /// <param name="output">Where the byte sequence decoding result will go.  The limit
+               /// is set to one past the position of the final char.
+               /// </param>
+               /// <throws>  IllegalArgumentException If either the input or the output buffer </throws>
+               /// <summary>  is not backed by an array
+               /// </summary>
+               public static void Decode(System.Collections.Generic.List<char> input, System.Collections.Generic.List<byte> output)
+               {
+            int numOutputBytes = GetDecodedLength(input);
+            if (output.Capacity < numOutputBytes)
+            {
+                output.Capacity = numOutputBytes;
+            }
+
+            // ensure the buffer we are writing into is occupied with nulls
+            if (output.Count < numOutputBytes)
+            {
+                for (int i = output.Count; i < numOutputBytes; i++)
+                {
+                    output.Add(Byte.MinValue);
+                }
+            }
+
+            if (input.Count > 0)
+            {
+                int caseNum = 0;
+                int outputByteNum = 0;
+                int inputCharNum = 0;
+                short inputChar;
+                CodingCase codingCase;
+                for (; inputCharNum < input.Count - 2; ++inputCharNum)
+                {
+                    codingCase = CODING_CASES[caseNum];
+                    inputChar = (short) input[inputCharNum];
+                    if (2 == codingCase.numBytes)
+                    {
+                        if (0 == caseNum)
+                        {
+                            output[outputByteNum] = (byte) (SupportClass.Number.URShift(inputChar, codingCase.initialShift));
+                        }
+                        else
+                        {
+                            output[outputByteNum] = (byte) (output[outputByteNum] + (byte) (SupportClass.Number.URShift(inputChar, codingCase.initialShift)));
+                        }
+                        output[outputByteNum + 1] = (byte) ((inputChar & codingCase.finalMask) << codingCase.finalShift);
+                    }
+                    else
+                    {
+                        // numBytes is 3
+                        output[outputByteNum] = (byte) (output[outputByteNum] + (byte) (SupportClass.Number.URShift(inputChar, codingCase.initialShift)));
+                        output[outputByteNum + 1] = (byte) (SupportClass.Number.URShift((inputChar & codingCase.middleMask), codingCase.middleShift));
+                        output[outputByteNum + 2] = (byte) ((inputChar & codingCase.finalMask) << codingCase.finalShift);
+                    }
+                    outputByteNum += codingCase.advanceBytes;
+                    if (++caseNum == CODING_CASES.Length)
+                    {
+                        caseNum = 0;
+                    }
+                }
+                // Handle final char
+                inputChar = (short) input[inputCharNum];
+                codingCase = CODING_CASES[caseNum];
+                if (0 == caseNum)
+                {
+                    output[outputByteNum] = 0;
+                }
+                output[outputByteNum] = (byte) (output[outputByteNum] + (byte) (SupportClass.Number.URShift(inputChar, codingCase.initialShift)));
+                long bytesLeft = numOutputBytes - outputByteNum;
+                if (bytesLeft > 1)
+                {
+                    if (2 == codingCase.numBytes)
+                    {
+                        output[outputByteNum + 1] = (byte) (SupportClass.Number.URShift((inputChar & codingCase.finalMask), codingCase.finalShift));
+                    }
+                    else
+                    {
+                        // numBytes is 3
+                        output[outputByteNum + 1] = (byte) (SupportClass.Number.URShift((inputChar & codingCase.middleMask), codingCase.middleShift));
+                        if (bytesLeft > 2)
+                        {
+                            output[outputByteNum + 2] = (byte) ((inputChar & codingCase.finalMask) << codingCase.finalShift);
+                        }
+                    }
+                }
+            }
+               }
+               
+               /// <summary> Decodes the given char sequence, which must have been encoded by
+               /// {@link #Encode(java.nio.ByteBuffer)} or 
+               /// {@link #Encode(java.nio.ByteBuffer, java.nio.CharBuffer)}.
+               /// 
+               /// </summary>
+               /// <param name="input">The char sequence to decode
+               /// </param>
+               /// <returns> A byte sequence containing the decoding result.  The limit
+               /// is set to one past the position of the final char.
+               /// </returns>
+               /// <throws>  IllegalArgumentException If the input buffer is not backed by an </throws>
+               /// <summary>  array
+               /// </summary>
+        public static System.Collections.Generic.List<byte> Decode(System.Collections.Generic.List<char> input)
+               {
+            System.Collections.Generic.List<byte> output = 
+                new System.Collections.Generic.List<byte>(new byte[GetDecodedLength(input)]);
+                       Decode(input, output);
+                       return output;
+               }
+               
+               /// <summary> Encodes the input byte sequence.
+               /// 
+               /// </summary>
+               /// <param name="input">The byte sequence to encode
+               /// </param>
+               /// <returns> A char sequence containing the encoding result.  The limit is set
+               /// to one past the position of the final char.
+               /// </returns>
+               /// <throws>  IllegalArgumentException If the input buffer is not backed by an </throws>
+               /// <summary>  array
+               /// </summary>
+               public static System.Collections.Generic.List<char> Encode(System.Collections.Generic.List<byte> input)
+               {
+            System.Collections.Generic.List<char> output = 
+                new System.Collections.Generic.List<char>(new char[GetEncodedLength(input)]);
+                       Encode(input, output);
+                       return output;
+               }
+               
+               internal class CodingCase
+               {
+                       internal int numBytes, initialShift, middleShift, finalShift, advanceBytes = 2;
+                       internal short middleMask, finalMask;
+                       
+                       internal CodingCase(int initialShift, int middleShift, int finalShift)
+                       {
+                               this.numBytes = 3;
+                               this.initialShift = initialShift;
+                               this.middleShift = middleShift;
+                               this.finalShift = finalShift;
+                               this.finalMask = (short) (SupportClass.Number.URShift((short) 0xFF, finalShift));
+                               this.middleMask = (short) ((short) 0xFF << middleShift);
+                       }
+                       
+                       internal CodingCase(int initialShift, int finalShift)
+                       {
+                               this.numBytes = 2;
+                               this.initialShift = initialShift;
+                               this.finalShift = finalShift;
+                               this.finalMask = (short) (SupportClass.Number.URShift((short) 0xFF, finalShift));
+                               if (finalShift != 0)
+                               {
+                                       advanceBytes = 1;
+                               }
+                       }
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/MapOfSets.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/MapOfSets.cs
new file mode 100644 (file)
index 0000000..f883322
--- /dev/null
@@ -0,0 +1,89 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Util
+{
+       
+       /// <summary> Helper class for keeping Listss of Objects associated with keys. <b>WARNING: THIS CLASS IS NOT THREAD SAFE</b></summary>
+    public class MapOfSets<T, V>
+    {
+               
+               // TODO: This will be a HashSet<T> when we start using .NET Framework 3.5
+               private System.Collections.Generic.IDictionary<T, System.Collections.Generic.Dictionary<V, V>> theMap;
+               
+               /// <param name="m">the backing store for this object
+               /// </param>
+               public MapOfSets(System.Collections.Generic.IDictionary<T, System.Collections.Generic.Dictionary<V, V>> m)
+               {
+                       theMap = m;
+               }
+               
+               /// <returns> direct access to the map backing this object.
+               /// </returns>
+               public virtual System.Collections.Generic.IDictionary<T, System.Collections.Generic.Dictionary<V, V>> GetMap()
+               {
+                       return theMap;
+               }
+               
+               /// <summary> Adds val to the Set associated with key in the Map.  If key is not 
+               /// already in the map, a new Set will first be created.
+               /// </summary>
+               /// <returns> the size of the Set associated with key once val is added to it.
+               /// </returns>
+               public virtual int Put(T key, V val)
+               {
+            // TODO: This will be a HashSet<T> when we start using .NET Framework 3.5
+            System.Collections.Generic.Dictionary<V, V> theSet;
+            if (!theMap.TryGetValue(key, out theSet))
+            {
+                theSet = new System.Collections.Generic.Dictionary<V, V>(23);
+                theMap[key] = theSet;
+            }
+            if (!theSet.ContainsKey(val))
+            {
+                theSet.Add(val, val);
+            }
+                       return theSet.Count;
+               }
+               /// <summary> Adds multiple vals to the Set associated with key in the Map.  
+               /// If key is not 
+               /// already in the map, a new Set will first be created.
+               /// </summary>
+               /// <returns> the size of the Set associated with key once val is added to it.
+               /// </returns>
+               public virtual int PutAll(T key, System.Collections.Generic.Dictionary<V, V> vals)
+               {
+            // TODO: This will be a HashSet<T> when we start using .NET Framework 3.5
+            System.Collections.Generic.Dictionary<V, V> theSet;
+            if (!theMap.TryGetValue(key, out theSet))
+            {
+                theSet = new System.Collections.Generic.Dictionary<V, V>(23);
+                theMap[key] = theSet;
+            }
+            foreach(V item in vals.Keys)
+            {
+                if (!theSet.ContainsKey(item))
+                {
+                    theSet.Add(item, item);
+                }
+            }
+                       return theSet.Count;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/MemoryModel.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/MemoryModel.cs
new file mode 100644 (file)
index 0000000..ba207c9
--- /dev/null
@@ -0,0 +1,48 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Util
+{
+       
+       /// <summary> Returns primitive memory sizes for estimating RAM usage.
+       /// 
+       /// </summary>
+       public abstract class MemoryModel
+       {
+               
+               /// <returns> size of array beyond contents
+               /// </returns>
+               public abstract int GetArraySize();
+               
+               /// <returns> Class size overhead
+               /// </returns>
+               public abstract int GetClassSize();
+               
+               /// <param name="clazz">a primitive Class - bool, byte, char, short, long, float,
+               /// short, double, int
+               /// </param>
+               /// <returns> the size in bytes of given primitive Class
+               /// </returns>
+               public abstract int GetPrimitiveSize(System.Type clazz);
+               
+               /// <returns> size of reference
+               /// </returns>
+               public abstract int GetReferenceSize();
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/NumericUtils.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/NumericUtils.cs
new file mode 100644 (file)
index 0000000..8e94022
--- /dev/null
@@ -0,0 +1,489 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using NumericTokenStream = Mono.Lucene.Net.Analysis.NumericTokenStream;
+using NumericField = Mono.Lucene.Net.Documents.NumericField;
+using NumericRangeFilter = Mono.Lucene.Net.Search.NumericRangeFilter;
+using NumericRangeQuery = Mono.Lucene.Net.Search.NumericRangeQuery;
+
+namespace Mono.Lucene.Net.Util
+{
+       
+       /// <summary> This is a helper class to generate prefix-encoded representations for numerical values
+       /// and supplies converters to represent float/double values as sortable integers/longs.
+       /// 
+       /// <p/>To quickly execute range queries in Apache Lucene, a range is divided recursively
+       /// into multiple intervals for searching: The center of the range is searched only with
+       /// the lowest possible precision in the trie, while the boundaries are matched
+       /// more exactly. This reduces the number of terms dramatically.
+       /// 
+       /// <p/>This class generates terms to achive this: First the numerical integer values need to
+       /// be converted to strings. For that integer values (32 bit or 64 bit) are made unsigned
+       /// and the bits are converted to ASCII chars with each 7 bit. The resulting string is
+       /// sortable like the original integer value. Each value is also prefixed
+       /// (in the first char) by the <code>shift</code> value (number of bits removed) used
+       /// during encoding.
+       /// 
+       /// <p/>To also index floating point numbers, this class supplies two methods to convert them
+       /// to integer values by changing their bit layout: {@link #doubleToSortableLong},
+       /// {@link #floatToSortableInt}. You will have no precision loss by
+       /// converting floating point numbers to integers and back (only that the integer form
+       /// is not usable). Other data types like dates can easily converted to longs or ints (e.g.
+       /// date to long: {@link java.util.Date#getTime}).
+       /// 
+       /// <p/>For easy usage, the trie algorithm is implemented for indexing inside
+       /// {@link NumericTokenStream} that can index <code>int</code>, <code>long</code>,
+       /// <code>float</code>, and <code>double</code>. For querying,
+       /// {@link NumericRangeQuery} and {@link NumericRangeFilter} implement the query part
+       /// for the same data types.
+       /// 
+       /// <p/>This class can also be used, to generate lexicographically sortable (according
+       /// {@link String#compareTo(String)}) representations of numeric data types for other
+       /// usages (e.g. sorting).
+       /// 
+       /// <p/><font color="red"><b>NOTE:</b> This API is experimental and
+       /// might change in incompatible ways in the next release.</font>
+       /// 
+       /// </summary>
+       /// <since> 2.9
+       /// </since>
+       public sealed class NumericUtils
+       {
+               
+               private NumericUtils()
+               {
+               } // no instance!
+               
+               /// <summary> The default precision step used by {@link NumericField}, {@link NumericTokenStream},
+               /// {@link NumericRangeQuery}, and {@link NumericRangeFilter} as default
+               /// </summary>
+               public const int PRECISION_STEP_DEFAULT = 4;
+               
+               /// <summary> Expert: Longs are stored at lower precision by shifting off lower bits. The shift count is
+               /// stored as <code>SHIFT_START_LONG+shift</code> in the first character
+               /// </summary>
+               public static char SHIFT_START_LONG = (char) 0x20;
+               
+               /// <summary> Expert: The maximum term length (used for <code>char[]</code> buffer size)
+               /// for encoding <code>long</code> values.
+               /// </summary>
+               /// <seealso cref="LongToPrefixCoded(long,int,char[])">
+               /// </seealso>
+               public const int BUF_SIZE_LONG = 63 / 7 + 2;
+               
+               /// <summary> Expert: Integers are stored at lower precision by shifting off lower bits. The shift count is
+               /// stored as <code>SHIFT_START_INT+shift</code> in the first character
+               /// </summary>
+               public static char SHIFT_START_INT = (char) 0x60;
+               
+               /// <summary> Expert: The maximum term length (used for <code>char[]</code> buffer size)
+               /// for encoding <code>int</code> values.
+               /// </summary>
+               /// <seealso cref="IntToPrefixCoded(int,int,char[])">
+               /// </seealso>
+               public const int BUF_SIZE_INT = 31 / 7 + 2;
+               
+               /// <summary> Expert: Returns prefix coded bits after reducing the precision by <code>shift</code> bits.
+               /// This is method is used by {@link NumericTokenStream}.
+               /// </summary>
+               /// <param name="val">the numeric value
+               /// </param>
+               /// <param name="shift">how many bits to strip from the right
+               /// </param>
+               /// <param name="buffer">that will contain the encoded chars, must be at least of {@link #BUF_SIZE_LONG}
+               /// length
+               /// </param>
+               /// <returns> number of chars written to buffer
+               /// </returns>
+               public static int LongToPrefixCoded(long val, int shift, char[] buffer)
+               {
+                       if (shift > 63 || shift < 0)
+                               throw new System.ArgumentException("Illegal shift value, must be 0..63");
+                       int nChars = (63 - shift) / 7 + 1, len = nChars + 1;
+                       buffer[0] = (char) (SHIFT_START_LONG + shift);
+            ulong sortableBits = BitConverter.ToUInt64(BitConverter.GetBytes(val), 0) ^ 0x8000000000000000L;
+                       sortableBits = sortableBits >> shift;
+                       while (nChars >= 1)
+                       {
+                               // Store 7 bits per character for good efficiency when UTF-8 encoding.
+                               // The whole number is right-justified so that lucene can prefix-encode
+                               // the terms more efficiently.
+                               buffer[nChars--] = (char) (sortableBits & 0x7f);
+                               sortableBits = sortableBits >> 7;
+                       }
+                       return len;
+               }
+               
+               /// <summary> Expert: Returns prefix coded bits after reducing the precision by <code>shift</code> bits.
+               /// This is method is used by {@link LongRangeBuilder}.
+               /// </summary>
+               /// <param name="val">the numeric value
+               /// </param>
+               /// <param name="shift">how many bits to strip from the right
+               /// </param>
+               public static System.String LongToPrefixCoded(long val, int shift)
+               {
+                       char[] buffer = new char[BUF_SIZE_LONG];
+                       int len = LongToPrefixCoded(val, shift, buffer);
+                       return new System.String(buffer, 0, len);
+               }
+               
+               /// <summary> This is a convenience method, that returns prefix coded bits of a long without
+               /// reducing the precision. It can be used to store the full precision value as a
+               /// stored field in index.
+               /// <p/>To decode, use {@link #prefixCodedToLong}.
+               /// </summary>
+               public static System.String LongToPrefixCoded(long val)
+               {
+                       return LongToPrefixCoded(val, 0);
+               }
+               
+               /// <summary> Expert: Returns prefix coded bits after reducing the precision by <code>shift</code> bits.
+               /// This is method is used by {@link NumericTokenStream}.
+               /// </summary>
+               /// <param name="val">the numeric value
+               /// </param>
+               /// <param name="shift">how many bits to strip from the right
+               /// </param>
+               /// <param name="buffer">that will contain the encoded chars, must be at least of {@link #BUF_SIZE_INT}
+               /// length
+               /// </param>
+               /// <returns> number of chars written to buffer
+               /// </returns>
+               public static int IntToPrefixCoded(int val, int shift, char[] buffer)
+               {
+                       if (shift > 31 || shift < 0)
+                               throw new System.ArgumentException("Illegal shift value, must be 0..31");
+                       int nChars = (31 - shift) / 7 + 1, len = nChars + 1;
+                       buffer[0] = (char) (SHIFT_START_INT + shift);
+                       int sortableBits = val ^ unchecked((int) 0x80000000);
+                       sortableBits = SupportClass.Number.URShift(sortableBits, shift);
+                       while (nChars >= 1)
+                       {
+                               // Store 7 bits per character for good efficiency when UTF-8 encoding.
+                               // The whole number is right-justified so that lucene can prefix-encode
+                               // the terms more efficiently.
+                               buffer[nChars--] = (char) (sortableBits & 0x7f);
+                               sortableBits = SupportClass.Number.URShift(sortableBits, 7);
+                       }
+                       return len;
+               }
+               
+               /// <summary> Expert: Returns prefix coded bits after reducing the precision by <code>shift</code> bits.
+               /// This is method is used by {@link IntRangeBuilder}.
+               /// </summary>
+               /// <param name="val">the numeric value
+               /// </param>
+               /// <param name="shift">how many bits to strip from the right
+               /// </param>
+               public static System.String IntToPrefixCoded(int val, int shift)
+               {
+                       char[] buffer = new char[BUF_SIZE_INT];
+                       int len = IntToPrefixCoded(val, shift, buffer);
+                       return new System.String(buffer, 0, len);
+               }
+               
+               /// <summary> This is a convenience method, that returns prefix coded bits of an int without
+               /// reducing the precision. It can be used to store the full precision value as a
+               /// stored field in index.
+               /// <p/>To decode, use {@link #prefixCodedToInt}.
+               /// </summary>
+               public static System.String IntToPrefixCoded(int val)
+               {
+                       return IntToPrefixCoded(val, 0);
+               }
+               
+               /// <summary> Returns a long from prefixCoded characters.
+               /// Rightmost bits will be zero for lower precision codes.
+               /// This method can be used to decode e.g. a stored field.
+               /// </summary>
+               /// <throws>  NumberFormatException if the supplied string is </throws>
+               /// <summary> not correctly prefix encoded.
+               /// </summary>
+               /// <seealso cref="LongToPrefixCoded(long)">
+               /// </seealso>
+               public static long PrefixCodedToLong(System.String prefixCoded)
+               {
+                       int shift = prefixCoded[0] - SHIFT_START_LONG;
+                       if (shift > 63 || shift < 0)
+                               throw new System.FormatException("Invalid shift value in prefixCoded string (is encoded value really a LONG?)");
+                       ulong sortableBits = 0UL;
+                       for (int i = 1, len = prefixCoded.Length; i < len; i++)
+                       {
+                               sortableBits <<= 7;
+                               char ch = prefixCoded[i];
+                               if (ch > 0x7f)
+                               {
+                                       throw new System.FormatException("Invalid prefixCoded numerical value representation (char " + System.Convert.ToString((int) ch, 16) + " at position " + i + " is invalid)");
+                               }
+                               sortableBits |= (ulong) ch;
+                       }
+                       return BitConverter.ToInt64(BitConverter.GetBytes((sortableBits << shift) ^ 0x8000000000000000L), 0);
+               }
+               
+               /// <summary> Returns an int from prefixCoded characters.
+               /// Rightmost bits will be zero for lower precision codes.
+               /// This method can be used to decode e.g. a stored field.
+               /// </summary>
+               /// <throws>  NumberFormatException if the supplied string is </throws>
+               /// <summary> not correctly prefix encoded.
+               /// </summary>
+               /// <seealso cref="IntToPrefixCoded(int)">
+               /// </seealso>
+               public static int PrefixCodedToInt(System.String prefixCoded)
+               {
+                       int shift = prefixCoded[0] - SHIFT_START_INT;
+                       if (shift > 31 || shift < 0)
+                               throw new System.FormatException("Invalid shift value in prefixCoded string (is encoded value really an INT?)");
+                       int sortableBits = 0;
+                       for (int i = 1, len = prefixCoded.Length; i < len; i++)
+                       {
+                               sortableBits <<= 7;
+                               char ch = prefixCoded[i];
+                               if (ch > 0x7f)
+                               {
+                                       throw new System.FormatException("Invalid prefixCoded numerical value representation (char " + System.Convert.ToString((int) ch, 16) + " at position " + i + " is invalid)");
+                               }
+                               sortableBits |= (int) ch;
+                       }
+                       return (sortableBits << shift) ^ unchecked((int) 0x80000000);
+               }
+               
+               /// <summary> Converts a <code>double</code> value to a sortable signed <code>long</code>.
+               /// The value is converted by getting their IEEE 754 floating-point &quot;double format&quot;
+               /// bit layout and then some bits are swapped, to be able to compare the result as long.
+               /// By this the precision is not reduced, but the value can easily used as a long.
+               /// </summary>
+               /// <seealso cref="sortableLongToDouble">
+               /// </seealso>
+               public static long DoubleToSortableLong(double val)
+               {
+            long f = BitConverter.DoubleToInt64Bits(val);   // {{Aroush-2.9}} will this work the same as 'java.lang.Double.doubleToRawLongBits()'?
+                       if (f < 0)
+                               f ^= 0x7fffffffffffffffL;
+                       return f;
+               }
+               
+               /// <summary> Convenience method: this just returns:
+               /// longToPrefixCoded(doubleToSortableLong(val))
+               /// </summary>
+               public static System.String DoubleToPrefixCoded(double val)
+               {
+                       return LongToPrefixCoded(DoubleToSortableLong(val));
+               }
+               
+               /// <summary> Converts a sortable <code>long</code> back to a <code>double</code>.</summary>
+               /// <seealso cref="doubleToSortableLong">
+               /// </seealso>
+               public static double SortableLongToDouble(long val)
+               {
+                       if (val < 0)
+                               val ^= 0x7fffffffffffffffL;
+                       return BitConverter.Int64BitsToDouble(val);
+               }
+               
+               /// <summary> Convenience method: this just returns:
+               /// sortableLongToDouble(prefixCodedToLong(val))
+               /// </summary>
+               public static double PrefixCodedToDouble(System.String val)
+               {
+                       return SortableLongToDouble(PrefixCodedToLong(val));
+               }
+               
+               /// <summary> Converts a <code>float</code> value to a sortable signed <code>int</code>.
+               /// The value is converted by getting their IEEE 754 floating-point &quot;float format&quot;
+               /// bit layout and then some bits are swapped, to be able to compare the result as int.
+               /// By this the precision is not reduced, but the value can easily used as an int.
+               /// </summary>
+               /// <seealso cref="sortableIntToFloat">
+               /// </seealso>
+               public static int FloatToSortableInt(float val)
+               {
+                       int f = BitConverter.ToInt32(BitConverter.GetBytes(val), 0);
+                       if (f < 0)
+                               f ^= 0x7fffffff;
+                       return f;
+               }
+               
+               /// <summary> Convenience method: this just returns:
+               /// intToPrefixCoded(floatToSortableInt(val))
+               /// </summary>
+               public static System.String FloatToPrefixCoded(float val)
+               {
+                       return IntToPrefixCoded(FloatToSortableInt(val));
+               }
+               
+               /// <summary> Converts a sortable <code>int</code> back to a <code>float</code>.</summary>
+               /// <seealso cref="floatToSortableInt">
+               /// </seealso>
+               public static float SortableIntToFloat(int val)
+               {
+                       if (val < 0)
+                               val ^= 0x7fffffff;
+            return BitConverter.ToSingle(BitConverter.GetBytes(val), 0);
+               }
+               
+               /// <summary> Convenience method: this just returns:
+               /// sortableIntToFloat(prefixCodedToInt(val))
+               /// </summary>
+               public static float PrefixCodedToFloat(System.String val)
+               {
+                       return SortableIntToFloat(PrefixCodedToInt(val));
+               }
+               
+               /// <summary> Expert: Splits a long range recursively.
+               /// You may implement a builder that adds clauses to a
+               /// {@link Mono.Lucene.Net.Search.BooleanQuery} for each call to its
+               /// {@link LongRangeBuilder#AddRange(String,String)}
+               /// method.
+               /// <p/>This method is used by {@link NumericRangeQuery}.
+               /// </summary>
+               public static void  SplitLongRange(LongRangeBuilder builder, int precisionStep, long minBound, long maxBound)
+               {
+                       SplitRange(builder, 64, precisionStep, minBound, maxBound);
+               }
+               
+               /// <summary> Expert: Splits an int range recursively.
+               /// You may implement a builder that adds clauses to a
+               /// {@link Mono.Lucene.Net.Search.BooleanQuery} for each call to its
+               /// {@link IntRangeBuilder#AddRange(String,String)}
+               /// method.
+               /// <p/>This method is used by {@link NumericRangeQuery}.
+               /// </summary>
+               public static void  SplitIntRange(IntRangeBuilder builder, int precisionStep, int minBound, int maxBound)
+               {
+                       SplitRange(builder, 32, precisionStep, (long) minBound, (long) maxBound);
+               }
+               
+               /// <summary>This helper does the splitting for both 32 and 64 bit. </summary>
+               private static void  SplitRange(System.Object builder, int valSize, int precisionStep, long minBound, long maxBound)
+               {
+                       if (precisionStep < 1)
+                               throw new System.ArgumentException("precisionStep must be >=1");
+                       if (minBound > maxBound)
+                               return ;
+                       for (int shift = 0; ; shift += precisionStep)
+                       {
+                               // calculate new bounds for inner precision
+                               long diff = 1L << (shift + precisionStep);
+                               long mask = ((1L << precisionStep) - 1L) << shift;
+                               bool hasLower = (minBound & mask) != 0L;
+                               bool hasUpper = (maxBound & mask) != mask;
+                               long nextMinBound = (hasLower?(minBound + diff):minBound) & ~ mask;
+                               long nextMaxBound = (hasUpper?(maxBound - diff):maxBound) & ~ mask;
+                               bool lowerWrapped = nextMinBound < minBound,
+                     upperWrapped = nextMaxBound > maxBound;
+      
+                if (shift+precisionStep>=valSize || nextMinBound>nextMaxBound || lowerWrapped || upperWrapped) 
+                               {
+                                       // We are in the lowest precision or the next precision is not available.
+                                       AddRange(builder, valSize, minBound, maxBound, shift);
+                                       // exit the split recursion loop
+                                       break;
+                               }
+                               
+                               if (hasLower)
+                                       AddRange(builder, valSize, minBound, minBound | mask, shift);
+                               if (hasUpper)
+                                       AddRange(builder, valSize, maxBound & ~ mask, maxBound, shift);
+                               
+                               // recurse to next precision
+                               minBound = nextMinBound;
+                               maxBound = nextMaxBound;
+                       }
+               }
+               
+               /// <summary>Helper that delegates to correct range builder </summary>
+               private static void  AddRange(System.Object builder, int valSize, long minBound, long maxBound, int shift)
+               {
+                       // for the max bound set all lower bits (that were shifted away):
+                       // this is important for testing or other usages of the splitted range
+                       // (e.g. to reconstruct the full range). The prefixEncoding will remove
+                       // the bits anyway, so they do not hurt!
+                       maxBound |= (1L << shift) - 1L;
+                       // delegate to correct range builder
+                       switch (valSize)
+                       {
+                               
+                               case 64: 
+                                       ((LongRangeBuilder) builder).AddRange(minBound, maxBound, shift);
+                                       break;
+                               
+                               case 32: 
+                                       ((IntRangeBuilder) builder).AddRange((int) minBound, (int) maxBound, shift);
+                                       break;
+                               
+                               default: 
+                                       // Should not happen!
+                                       throw new System.ArgumentException("valSize must be 32 or 64.");
+                               
+                       }
+               }
+               
+               /// <summary> Expert: Callback for {@link #splitLongRange}.
+               /// You need to overwrite only one of the methods.
+               /// <p/><font color="red"><b>NOTE:</b> This is a very low-level interface,
+               /// the method signatures may change in later versions.</font>
+               /// </summary>
+               public abstract class LongRangeBuilder
+               {
+                       
+                       /// <summary> Overwrite this method, if you like to receive the already prefix encoded range bounds.
+                       /// You can directly build classical (inclusive) range queries from them.
+                       /// </summary>
+                       public virtual void  AddRange(System.String minPrefixCoded, System.String maxPrefixCoded)
+                       {
+                               throw new System.NotSupportedException();
+                       }
+                       
+                       /// <summary> Overwrite this method, if you like to receive the raw long range bounds.
+                       /// You can use this for e.g. debugging purposes (print out range bounds).
+                       /// </summary>
+                       public virtual void  AddRange(long min, long max, int shift)
+                       {
+                               AddRange(Mono.Lucene.Net.Util.NumericUtils.LongToPrefixCoded(min, shift), Mono.Lucene.Net.Util.NumericUtils.LongToPrefixCoded(max, shift));
+                       }
+               }
+               
+               /// <summary> Expert: Callback for {@link #splitIntRange}.
+               /// You need to overwrite only one of the methods.
+               /// <p/><font color="red"><b>NOTE:</b> This is a very low-level interface,
+               /// the method signatures may change in later versions.</font>
+               /// </summary>
+               public abstract class IntRangeBuilder
+               {
+                       
+                       /// <summary> Overwrite this method, if you like to receive the already prefix encoded range bounds.
+                       /// You can directly build classical range (inclusive) queries from them.
+                       /// </summary>
+                       public virtual void  AddRange(System.String minPrefixCoded, System.String maxPrefixCoded)
+                       {
+                               throw new System.NotSupportedException();
+                       }
+                       
+                       /// <summary> Overwrite this method, if you like to receive the raw int range bounds.
+                       /// You can use this for e.g. debugging purposes (print out range bounds).
+                       /// </summary>
+                       public virtual void  AddRange(int min, int max, int shift)
+                       {
+                               AddRange(Mono.Lucene.Net.Util.NumericUtils.IntToPrefixCoded(min, shift), Mono.Lucene.Net.Util.NumericUtils.IntToPrefixCoded(max, shift));
+                       }
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/OpenBitSet.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/OpenBitSet.cs
new file mode 100644 (file)
index 0000000..84f01e9
--- /dev/null
@@ -0,0 +1,954 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using DocIdSet = Mono.Lucene.Net.Search.DocIdSet;
+using DocIdSetIterator = Mono.Lucene.Net.Search.DocIdSetIterator;
+
+namespace Mono.Lucene.Net.Util
+{
+       
+       /// <summary>An "open" BitSet implementation that allows direct access to the array of words
+       /// storing the bits.
+       /// <p/>
+       /// Unlike java.util.bitset, the fact that bits are packed into an array of longs
+       /// is part of the interface.  This allows efficient implementation of other algorithms
+       /// by someone other than the author.  It also allows one to efficiently implement
+       /// alternate serialization or interchange formats.
+       /// <p/>
+       /// <code>OpenBitSet</code> is faster than <code>java.util.BitSet</code> in most operations
+       /// and *much* faster at calculating cardinality of sets and results of set operations.
+       /// It can also handle sets of larger cardinality (up to 64 * 2**32-1)
+       /// <p/>
+       /// The goals of <code>OpenBitSet</code> are the fastest implementation possible, and
+       /// maximum code reuse.  Extra safety and encapsulation
+       /// may always be built on top, but if that's built in, the cost can never be removed (and
+       /// hence people re-implement their own version in order to get better performance).
+       /// If you want a "safe", totally encapsulated (and slower and limited) BitSet
+       /// class, use <code>java.util.BitSet</code>.
+       /// <p/>
+       /// <h3>Performance Results</h3>
+       /// 
+       /// Test system: Pentium 4, Sun Java 1.5_06 -server -Xbatch -Xmx64M
+       /// <br/>BitSet size = 1,000,000
+       /// <br/>Results are java.util.BitSet time divided by OpenBitSet time.
+       /// <table border="1">
+       /// <tr>
+       /// <th></th> <th>cardinality</th> <th>intersect_count</th> <th>union</th> <th>nextSetBit</th> <th>get</th> <th>iterator</th>
+       /// </tr>
+       /// <tr>
+       /// <th>50% full</th> <td>3.36</td> <td>3.96</td> <td>1.44</td> <td>1.46</td> <td>1.99</td> <td>1.58</td>
+       /// </tr>
+       /// <tr>
+       /// <th>1% full</th> <td>3.31</td> <td>3.90</td> <td>&#160;</td> <td>1.04</td> <td>&#160;</td> <td>0.99</td>
+       /// </tr>
+       /// </table>
+       /// <br/>
+       /// Test system: AMD Opteron, 64 bit linux, Sun Java 1.5_06 -server -Xbatch -Xmx64M
+       /// <br/>BitSet size = 1,000,000
+       /// <br/>Results are java.util.BitSet time divided by OpenBitSet time.
+       /// <table border="1">
+       /// <tr>
+       /// <th></th> <th>cardinality</th> <th>intersect_count</th> <th>union</th> <th>nextSetBit</th> <th>get</th> <th>iterator</th>
+       /// </tr>
+       /// <tr>
+       /// <th>50% full</th> <td>2.50</td> <td>3.50</td> <td>1.00</td> <td>1.03</td> <td>1.12</td> <td>1.25</td>
+       /// </tr>
+       /// <tr>
+       /// <th>1% full</th> <td>2.51</td> <td>3.49</td> <td>&#160;</td> <td>1.00</td> <td>&#160;</td> <td>1.02</td>
+       /// </tr>
+       /// </table>
+       /// </summary>
+       /// <version>  $Id$
+       /// </version>
+       
+       [Serializable]
+       public class OpenBitSet:DocIdSet, System.ICloneable
+       {
+               protected internal long[] bits;
+               protected internal int wlen; // number of words (elements) used in the array
+               
+               /// <summary>Constructs an OpenBitSet large enough to hold numBits.
+               /// 
+               /// </summary>
+               /// <param name="numBits">
+               /// </param>
+               public OpenBitSet(long numBits)
+               {
+                       bits = new long[Bits2words(numBits)];
+                       wlen = bits.Length;
+               }
+               
+               public OpenBitSet():this(64)
+               {
+               }
+               
+               /// <summary>Constructs an OpenBitSet from an existing long[].
+               /// <br/>
+               /// The first 64 bits are in long[0],
+               /// with bit index 0 at the least significant bit, and bit index 63 at the most significant.
+               /// Given a bit index,
+               /// the word containing it is long[index/64], and it is at bit number index%64 within that word.
+               /// <p/>
+               /// numWords are the number of elements in the array that contain
+               /// set bits (non-zero longs).
+               /// numWords should be &lt;= bits.length, and
+               /// any existing words in the array at position &gt;= numWords should be zero.
+               /// 
+               /// </summary>
+               public OpenBitSet(long[] bits, int numWords)
+               {
+                       this.bits = bits;
+                       this.wlen = numWords;
+               }
+               
+               public override DocIdSetIterator Iterator()
+               {
+                       return new OpenBitSetIterator(bits, wlen);
+               }
+
+               /// <summary>This DocIdSet implementation is cacheable. </summary>
+               public override bool IsCacheable()
+               {
+                       return true;
+               }
+               
+               /// <summary>Returns the current capacity in bits (1 greater than the index of the last bit) </summary>
+               public virtual long Capacity()
+               {
+                       return bits.Length << 6;
+               }
+               
+               /// <summary> Returns the current capacity of this set.  Included for
+               /// compatibility.  This is *not* equal to {@link #cardinality}
+               /// </summary>
+               public virtual long Size()
+               {
+                       return Capacity();
+               }
+               
+               /// <summary>Returns true if there are no set bits </summary>
+               public virtual bool IsEmpty()
+               {
+                       return Cardinality() == 0;
+               }
+               
+               /// <summary>Expert: returns the long[] storing the bits </summary>
+               public virtual long[] GetBits()
+               {
+                       return bits;
+               }
+               
+               /// <summary>Expert: sets a new long[] to use as the bit storage </summary>
+               public virtual void  SetBits(long[] bits)
+               {
+                       this.bits = bits;
+               }
+               
+               /// <summary>Expert: gets the number of longs in the array that are in use </summary>
+               public virtual int GetNumWords()
+               {
+                       return wlen;
+               }
+               
+               /// <summary>Expert: sets the number of longs in the array that are in use </summary>
+               public virtual void  SetNumWords(int nWords)
+               {
+                       this.wlen = nWords;
+               }
+               
+               
+               
+               /// <summary>Returns true or false for the specified bit index. </summary>
+               public virtual bool Get(int index)
+               {
+                       int i = index >> 6; // div 64
+                       // signed shift will keep a negative index and force an
+                       // array-index-out-of-bounds-exception, removing the need for an explicit check.
+                       if (i >= bits.Length)
+                               return false;
+                       
+                       int bit = index & 0x3f; // mod 64
+                       long bitmask = 1L << bit;
+                       return (bits[i] & bitmask) != 0;
+               }
+               
+               
+               /// <summary>Returns true or false for the specified bit index.
+               /// The index should be less than the OpenBitSet size
+               /// </summary>
+               public virtual bool FastGet(int index)
+               {
+                       int i = index >> 6; // div 64
+                       // signed shift will keep a negative index and force an
+                       // array-index-out-of-bounds-exception, removing the need for an explicit check.
+                       int bit = index & 0x3f; // mod 64
+                       long bitmask = 1L << bit;
+                       return (bits[i] & bitmask) != 0;
+               }
+               
+               
+               
+               /// <summary>Returns true or false for the specified bit index</summary>
+               public virtual bool Get(long index)
+               {
+                       int i = (int) (index >> 6); // div 64
+                       if (i >= bits.Length)
+                               return false;
+                       int bit = (int) index & 0x3f; // mod 64
+                       long bitmask = 1L << bit;
+                       return (bits[i] & bitmask) != 0;
+               }
+               
+               /// <summary>Returns true or false for the specified bit index.
+               /// The index should be less than the OpenBitSet size.
+               /// </summary>
+               public virtual bool FastGet(long index)
+               {
+                       int i = (int) (index >> 6); // div 64
+                       int bit = (int) index & 0x3f; // mod 64
+                       long bitmask = 1L << bit;
+                       return (bits[i] & bitmask) != 0;
+               }
+               
+               /*
+               // alternate implementation of get()
+               public boolean get1(int index) {
+               int i = index >> 6;                // div 64
+               int bit = index & 0x3f;            // mod 64
+               return ((bits[i]>>>bit) & 0x01) != 0;
+               // this does a long shift and a bittest (on x86) vs
+               // a long shift, and a long AND, (the test for zero is prob a no-op)
+               // testing on a P4 indicates this is slower than (bits[i] & bitmask) != 0;
+               }
+               */
+               
+               
+               /// <summary>returns 1 if the bit is set, 0 if not.
+               /// The index should be less than the OpenBitSet size
+               /// </summary>
+               public virtual int GetBit(int index)
+               {
+                       int i = index >> 6; // div 64
+                       int bit = index & 0x3f; // mod 64
+                       return ((int )((ulong) (bits[i]) >> bit)) & 0x01;
+               }
+               
+               
+               /*
+               public boolean get2(int index) {
+               int word = index >> 6;            // div 64
+               int bit = index & 0x0000003f;     // mod 64
+               return (bits[word] << bit) < 0;   // hmmm, this would work if bit order were reversed
+               // we could right shift and check for parity bit, if it was available to us.
+               }
+               */
+               
+               /// <summary>sets a bit, expanding the set size if necessary </summary>
+               public virtual void  Set(long index)
+               {
+                       int wordNum = ExpandingWordNum(index);
+                       int bit = (int) index & 0x3f;
+                       long bitmask = 1L << bit;
+                       bits[wordNum] |= bitmask;
+               }
+               
+               
+               /// <summary>Sets the bit at the specified index.
+               /// The index should be less than the OpenBitSet size.
+               /// </summary>
+               public virtual void  FastSet(int index)
+               {
+                       int wordNum = index >> 6; // div 64
+                       int bit = index & 0x3f; // mod 64
+                       long bitmask = 1L << bit;
+                       bits[wordNum] |= bitmask;
+               }
+               
+               /// <summary>Sets the bit at the specified index.
+               /// The index should be less than the OpenBitSet size.
+               /// </summary>
+               public virtual void  FastSet(long index)
+               {
+                       int wordNum = (int) (index >> 6);
+                       int bit = (int) index & 0x3f;
+                       long bitmask = 1L << bit;
+                       bits[wordNum] |= bitmask;
+               }
+               
+               /// <summary>Sets a range of bits, expanding the set size if necessary
+               /// 
+               /// </summary>
+               /// <param name="startIndex">lower index
+               /// </param>
+               /// <param name="endIndex">one-past the last bit to set
+               /// </param>
+               public virtual void  Set(long startIndex, long endIndex)
+               {
+                       if (endIndex <= startIndex)
+                               return ;
+                       
+                       int startWord = (int) (startIndex >> 6);
+                       
+                       // since endIndex is one past the end, this is index of the last
+                       // word to be changed.
+                       int endWord = ExpandingWordNum(endIndex - 1);
+                       
+                       long startmask = - 1L << (int) startIndex;
+                       long endmask = (long) (0xffffffffffffffffUL >> (int) - endIndex); // 64-(endIndex&0x3f) is the same as -endIndex due to wrap
+                       
+                       if (startWord == endWord)
+                       {
+                               bits[startWord] |= (startmask & endmask);
+                               return ;
+                       }
+                       
+                       bits[startWord] |= startmask;
+            for (int i = startWord + 1; i < endWord; i++)
+                bits[i] = -1L;
+                       bits[endWord] |= endmask;
+               }
+
+
+
+        protected internal virtual int ExpandingWordNum(long index)
+               {
+                       int wordNum = (int) (index >> 6);
+                       if (wordNum >= wlen)
+                       {
+                               EnsureCapacity(index + 1);
+                               wlen = wordNum + 1;
+                       }
+                       return wordNum;
+               }
+               
+               
+               /// <summary>clears a bit.
+               /// The index should be less than the OpenBitSet size.
+               /// </summary>
+               public virtual void  FastClear(int index)
+               {
+                       int wordNum = index >> 6;
+                       int bit = index & 0x03f;
+                       long bitmask = 1L << bit;
+                       bits[wordNum] &= ~ bitmask;
+                       // hmmm, it takes one more instruction to clear than it does to set... any
+                       // way to work around this?  If there were only 63 bits per word, we could
+                       // use a right shift of 10111111...111 in binary to position the 0 in the
+                       // correct place (using sign extension).
+                       // Could also use Long.rotateRight() or rotateLeft() *if* they were converted
+                       // by the JVM into a native instruction.
+                       // bits[word] &= Long.rotateLeft(0xfffffffe,bit);
+               }
+               
+               /// <summary>clears a bit.
+               /// The index should be less than the OpenBitSet size.
+               /// </summary>
+               public virtual void  FastClear(long index)
+               {
+                       int wordNum = (int) (index >> 6); // div 64
+                       int bit = (int) index & 0x3f; // mod 64
+                       long bitmask = 1L << bit;
+                       bits[wordNum] &= ~ bitmask;
+               }
+               
+               /// <summary>clears a bit, allowing access beyond the current set size without changing the size.</summary>
+               public virtual void  Clear(long index)
+               {
+                       int wordNum = (int) (index >> 6); // div 64
+                       if (wordNum >= wlen)
+                               return ;
+                       int bit = (int) index & 0x3f; // mod 64
+                       long bitmask = 1L << bit;
+                       bits[wordNum] &= ~ bitmask;
+               }
+               
+               /// <summary>Clears a range of bits.  Clearing past the end does not change the size of the set.
+               /// 
+               /// </summary>
+               /// <param name="startIndex">lower index
+               /// </param>
+               /// <param name="endIndex">one-past the last bit to clear
+               /// </param>
+               public virtual void  Clear(int startIndex, int endIndex)
+               {
+                       if (endIndex <= startIndex)
+                               return ;
+                       
+                       int startWord = (startIndex >> 6);
+                       if (startWord >= wlen)
+                               return ;
+                       
+                       // since endIndex is one past the end, this is index of the last
+                       // word to be changed.
+                       int endWord = ((endIndex - 1) >> 6);
+                       
+                       long startmask = - 1L << startIndex;
+                       long endmask = (long) (0xffffffffffffffffUL >> - endIndex); // 64-(endIndex&0x3f) is the same as -endIndex due to wrap
+                       
+                       // invert masks since we are clearing
+                       startmask = ~ startmask;
+                       endmask = ~ endmask;
+                       
+                       if (startWord == endWord)
+                       {
+                               bits[startWord] &= (startmask | endmask);
+                               return ;
+                       }
+                       
+                       bits[startWord] &= startmask;
+                       
+                       int middle = System.Math.Min(wlen, endWord);
+            for (int i = startWord + 1; i < middle; i++)
+                bits[i] = 0L;
+                       if (endWord < wlen)
+                       {
+                               bits[endWord] &= endmask;
+                       }
+               }
+               
+               
+               /// <summary>Clears a range of bits.  Clearing past the end does not change the size of the set.
+               /// 
+               /// </summary>
+               /// <param name="startIndex">lower index
+               /// </param>
+               /// <param name="endIndex">one-past the last bit to clear
+               /// </param>
+               public virtual void  Clear(long startIndex, long endIndex)
+               {
+                       if (endIndex <= startIndex)
+                               return ;
+                       
+                       int startWord = (int) (startIndex >> 6);
+                       if (startWord >= wlen)
+                               return ;
+                       
+                       // since endIndex is one past the end, this is index of the last
+                       // word to be changed.
+                       int endWord = (int) ((endIndex - 1) >> 6);
+                       
+                       long startmask = - 1L << (int) startIndex;
+                       long endmask = (long) (0xffffffffffffffffUL >> (int) - endIndex); // 64-(endIndex&0x3f) is the same as -endIndex due to wrap
+                       
+                       // invert masks since we are clearing
+                       startmask = ~ startmask;
+                       endmask = ~ endmask;
+                       
+                       if (startWord == endWord)
+                       {
+                               bits[startWord] &= (startmask | endmask);
+                               return ;
+                       }
+                       
+                       bits[startWord] &= startmask;
+                       
+                       int middle = System.Math.Min(wlen, endWord);
+            for (int i = startWord + 1; i < middle; i++)
+                bits[i] = 0L;
+                       if (endWord < wlen)
+                       {
+                               bits[endWord] &= endmask;
+                       }
+               }
+               
+               
+               
+               /// <summary>Sets a bit and returns the previous value.
+               /// The index should be less than the OpenBitSet size.
+               /// </summary>
+               public virtual bool GetAndSet(int index)
+               {
+                       int wordNum = index >> 6; // div 64
+                       int bit = index & 0x3f; // mod 64
+                       long bitmask = 1L << bit;
+                       bool val = (bits[wordNum] & bitmask) != 0;
+                       bits[wordNum] |= bitmask;
+                       return val;
+               }
+               
+               /// <summary>Sets a bit and returns the previous value.
+               /// The index should be less than the OpenBitSet size.
+               /// </summary>
+               public virtual bool GetAndSet(long index)
+               {
+                       int wordNum = (int) (index >> 6); // div 64
+                       int bit = (int) index & 0x3f; // mod 64
+                       long bitmask = 1L << bit;
+                       bool val = (bits[wordNum] & bitmask) != 0;
+                       bits[wordNum] |= bitmask;
+                       return val;
+               }
+               
+               /// <summary>flips a bit.
+               /// The index should be less than the OpenBitSet size.
+               /// </summary>
+               public virtual void  FastFlip(int index)
+               {
+                       int wordNum = index >> 6; // div 64
+                       int bit = index & 0x3f; // mod 64
+                       long bitmask = 1L << bit;
+                       bits[wordNum] ^= bitmask;
+               }
+               
+               /// <summary>flips a bit.
+               /// The index should be less than the OpenBitSet size.
+               /// </summary>
+               public virtual void  FastFlip(long index)
+               {
+                       int wordNum = (int) (index >> 6); // div 64
+                       int bit = (int) index & 0x3f; // mod 64
+                       long bitmask = 1L << bit;
+                       bits[wordNum] ^= bitmask;
+               }
+               
+               /// <summary>flips a bit, expanding the set size if necessary </summary>
+               public virtual void  Flip(long index)
+               {
+                       int wordNum = ExpandingWordNum(index);
+                       int bit = (int) index & 0x3f; // mod 64
+                       long bitmask = 1L << bit;
+                       bits[wordNum] ^= bitmask;
+               }
+               
+               /// <summary>flips a bit and returns the resulting bit value.
+               /// The index should be less than the OpenBitSet size.
+               /// </summary>
+               public virtual bool FlipAndGet(int index)
+               {
+                       int wordNum = index >> 6; // div 64
+                       int bit = index & 0x3f; // mod 64
+                       long bitmask = 1L << bit;
+                       bits[wordNum] ^= bitmask;
+                       return (bits[wordNum] & bitmask) != 0;
+               }
+               
+               /// <summary>flips a bit and returns the resulting bit value.
+               /// The index should be less than the OpenBitSet size.
+               /// </summary>
+               public virtual bool FlipAndGet(long index)
+               {
+                       int wordNum = (int) (index >> 6); // div 64
+                       int bit = (int) index & 0x3f; // mod 64
+                       long bitmask = 1L << bit;
+                       bits[wordNum] ^= bitmask;
+                       return (bits[wordNum] & bitmask) != 0;
+               }
+               
+               /// <summary>Flips a range of bits, expanding the set size if necessary
+               /// 
+               /// </summary>
+               /// <param name="startIndex">lower index
+               /// </param>
+               /// <param name="endIndex">one-past the last bit to flip
+               /// </param>
+               public virtual void  Flip(long startIndex, long endIndex)
+               {
+                       if (endIndex <= startIndex)
+                               return ;
+                       int startWord = (int) (startIndex >> 6);
+                       
+                       // since endIndex is one past the end, this is index of the last
+                       // word to be changed.
+                       int endWord = ExpandingWordNum(endIndex - 1);
+                       
+                       /*** Grrr, java shifting wraps around so -1L>>>64 == -1
+                       * for that reason, make sure not to use endmask if the bits to flip will
+                       * be zero in the last word (redefine endWord to be the last changed...)
+                       long startmask = -1L << (startIndex & 0x3f);     // example: 11111...111000
+                       long endmask = -1L >>> (64-(endIndex & 0x3f));   // example: 00111...111111
+                       ***/
+                       
+                       long startmask = - 1L << (int) startIndex;
+                       long endmask = (long) (0xffffffffffffffffUL >> (int) - endIndex); // 64-(endIndex&0x3f) is the same as -endIndex due to wrap
+                       
+                       if (startWord == endWord)
+                       {
+                               bits[startWord] ^= (startmask & endmask);
+                               return ;
+                       }
+                       
+                       bits[startWord] ^= startmask;
+                       
+                       for (int i = startWord + 1; i < endWord; i++)
+                       {
+                               bits[i] = ~ bits[i];
+                       }
+                       
+                       bits[endWord] ^= endmask;
+               }
+               
+               
+               /*
+               public static int pop(long v0, long v1, long v2, long v3) {
+               // derived from pop_array by setting last four elems to 0.
+               // exchanges one pop() call for 10 elementary operations
+               // saving about 7 instructions... is there a better way?
+               long twosA=v0 & v1;
+               long ones=v0^v1;
+               
+               long u2=ones^v2;
+               long twosB =(ones&v2)|(u2&v3);
+               ones=u2^v3;
+               
+               long fours=(twosA&twosB);
+               long twos=twosA^twosB;
+               
+               return (pop(fours)<<2)
+               + (pop(twos)<<1)
+               + pop(ones);
+               
+               }
+               */
+               
+               
+               /// <returns> the number of set bits 
+               /// </returns>
+               public virtual long Cardinality()
+               {
+                       return BitUtil.Pop_array(bits, 0, wlen);
+               }
+               
+               /// <summary>Returns the popcount or cardinality of the intersection of the two sets.
+               /// Neither set is modified.
+               /// </summary>
+               public static long IntersectionCount(OpenBitSet a, OpenBitSet b)
+               {
+                       return BitUtil.Pop_intersect(a.bits, b.bits, 0, System.Math.Min(a.wlen, b.wlen));
+               }
+               
+               /// <summary>Returns the popcount or cardinality of the union of the two sets.
+               /// Neither set is modified.
+               /// </summary>
+               public static long UnionCount(OpenBitSet a, OpenBitSet b)
+               {
+                       long tot = BitUtil.Pop_union(a.bits, b.bits, 0, System.Math.Min(a.wlen, b.wlen));
+                       if (a.wlen < b.wlen)
+                       {
+                               tot += BitUtil.Pop_array(b.bits, a.wlen, b.wlen - a.wlen);
+                       }
+                       else if (a.wlen > b.wlen)
+                       {
+                               tot += BitUtil.Pop_array(a.bits, b.wlen, a.wlen - b.wlen);
+                       }
+                       return tot;
+               }
+               
+               /// <summary>Returns the popcount or cardinality of "a and not b"
+               /// or "intersection(a, not(b))".
+               /// Neither set is modified.
+               /// </summary>
+               public static long AndNotCount(OpenBitSet a, OpenBitSet b)
+               {
+                       long tot = BitUtil.Pop_andnot(a.bits, b.bits, 0, System.Math.Min(a.wlen, b.wlen));
+                       if (a.wlen > b.wlen)
+                       {
+                               tot += BitUtil.Pop_array(a.bits, b.wlen, a.wlen - b.wlen);
+                       }
+                       return tot;
+               }
+               
+               /// <summary>Returns the popcount or cardinality of the exclusive-or of the two sets.
+               /// Neither set is modified.
+               /// </summary>
+               public static long XorCount(OpenBitSet a, OpenBitSet b)
+               {
+                       long tot = BitUtil.Pop_xor(a.bits, b.bits, 0, System.Math.Min(a.wlen, b.wlen));
+                       if (a.wlen < b.wlen)
+                       {
+                               tot += BitUtil.Pop_array(b.bits, a.wlen, b.wlen - a.wlen);
+                       }
+                       else if (a.wlen > b.wlen)
+                       {
+                               tot += BitUtil.Pop_array(a.bits, b.wlen, a.wlen - b.wlen);
+                       }
+                       return tot;
+               }
+               
+               
+               /// <summary>Returns the index of the first set bit starting at the index specified.
+               /// -1 is returned if there are no more set bits.
+               /// </summary>
+               public virtual int NextSetBit(int index)
+               {
+                       int i = index >> 6;
+                       if (i >= wlen)
+                               return - 1;
+                       int subIndex = index & 0x3f; // index within the word
+                       long word = bits[i] >> subIndex; // skip all the bits to the right of index
+                       
+                       if (word != 0)
+                       {
+                               return (i << 6) + subIndex + BitUtil.Ntz(word);
+                       }
+                       
+                       while (++i < wlen)
+                       {
+                               word = bits[i];
+                               if (word != 0)
+                                       return (i << 6) + BitUtil.Ntz(word);
+                       }
+                       
+                       return - 1;
+               }
+               
+               /// <summary>Returns the index of the first set bit starting at the index specified.
+               /// -1 is returned if there are no more set bits.
+               /// </summary>
+               public virtual long NextSetBit(long index)
+               {
+                       int i = (int) (index >> 6);
+                       if (i >= wlen)
+                               return - 1;
+                       int subIndex = (int) index & 0x3f; // index within the word
+                       long word = (long) ((ulong) bits[i] >> subIndex); // skip all the bits to the right of index
+                       
+                       if (word != 0)
+                       {
+                               return (((long) i) << 6) + (subIndex + BitUtil.Ntz(word));
+                       }
+                       
+                       while (++i < wlen)
+                       {
+                               word = bits[i];
+                               if (word != 0)
+                                       return (((long) i) << 6) + BitUtil.Ntz(word);
+                       }
+                       
+                       return - 1;
+               }
+               
+               
+               
+               
+               public virtual System.Object Clone()
+               {
+                       try
+                       {
+                OpenBitSet obs = new OpenBitSet((long[]) bits.Clone(), wlen);
+                               //obs.bits = new long[obs.bits.Length];
+                               //obs.bits.CopyTo(obs.bits, 0); // hopefully an array clone is as fast(er) than arraycopy
+                               return obs;
+                       }
+                       catch (System.Exception e)
+                       {
+                               throw new System.SystemException(e.Message, e);
+                       }
+               }
+               
+               /// <summary>this = this AND other </summary>
+               public virtual void  Intersect(OpenBitSet other)
+               {
+                       int newLen = System.Math.Min(this.wlen, other.wlen);
+                       long[] thisArr = this.bits;
+                       long[] otherArr = other.bits;
+                       // testing against zero can be more efficient
+                       int pos = newLen;
+                       while (--pos >= 0)
+                       {
+                               thisArr[pos] &= otherArr[pos];
+                       }
+                       if (this.wlen > newLen)
+                       {
+                               // fill zeros from the new shorter length to the old length
+                for (int i = newLen; i < this.wlen; i++)
+                    bits[i] = 0L;
+                       }
+                       this.wlen = newLen;
+               }
+               
+               /// <summary>this = this OR other </summary>
+               public virtual void  Union(OpenBitSet other)
+               {
+                       int newLen = System.Math.Max(wlen, other.wlen);
+                       EnsureCapacityWords(newLen);
+                       
+                       long[] thisArr = this.bits;
+                       long[] otherArr = other.bits;
+                       int pos = System.Math.Min(wlen, other.wlen);
+                       while (--pos >= 0)
+                       {
+                               thisArr[pos] |= otherArr[pos];
+                       }
+                       if (this.wlen < newLen)
+                       {
+                               Array.Copy(otherArr, this.wlen, thisArr, this.wlen, newLen - this.wlen);
+                       }
+                       this.wlen = newLen;
+               }
+               
+               
+               /// <summary>Remove all elements set in other. this = this AND_NOT other </summary>
+               public virtual void  Remove(OpenBitSet other)
+               {
+                       int idx = System.Math.Min(wlen, other.wlen);
+                       long[] thisArr = this.bits;
+                       long[] otherArr = other.bits;
+                       while (--idx >= 0)
+                       {
+                               thisArr[idx] &= ~ otherArr[idx];
+                       }
+               }
+               
+               /// <summary>this = this XOR other </summary>
+               public virtual void  Xor(OpenBitSet other)
+               {
+                       int newLen = System.Math.Max(wlen, other.wlen);
+                       EnsureCapacityWords(newLen);
+                       
+                       long[] thisArr = this.bits;
+                       long[] otherArr = other.bits;
+                       int pos = System.Math.Min(wlen, other.wlen);
+                       while (--pos >= 0)
+                       {
+                               thisArr[pos] ^= otherArr[pos];
+                       }
+                       if (this.wlen < newLen)
+                       {
+                               Array.Copy(otherArr, this.wlen, thisArr, this.wlen, newLen - this.wlen);
+                       }
+                       this.wlen = newLen;
+               }
+               
+               
+               // some BitSet compatability methods
+               
+               //** see {@link intersect} */
+               public virtual void  And(OpenBitSet other)
+               {
+                       Intersect(other);
+               }
+               
+               //** see {@link union} */
+               public virtual void  Or(OpenBitSet other)
+               {
+                       Union(other);
+               }
+               
+               //** see {@link andNot} */
+               public virtual void  AndNot(OpenBitSet other)
+               {
+                       Remove(other);
+               }
+               
+               /// <summary>returns true if the sets have any elements in common </summary>
+               public virtual bool Intersects(OpenBitSet other)
+               {
+                       int pos = System.Math.Min(this.wlen, other.wlen);
+                       long[] thisArr = this.bits;
+                       long[] otherArr = other.bits;
+                       while (--pos >= 0)
+                       {
+                               if ((thisArr[pos] & otherArr[pos]) != 0)
+                                       return true;
+                       }
+                       return false;
+               }
+               
+               
+               
+               /// <summary>Expand the long[] with the size given as a number of words (64 bit longs).
+               /// getNumWords() is unchanged by this call.
+               /// </summary>
+               public virtual void  EnsureCapacityWords(int numWords)
+               {
+                       if (bits.Length < numWords)
+                       {
+                               bits = ArrayUtil.Grow(bits, numWords);
+                       }
+               }
+               
+               /// <summary>Ensure that the long[] is big enough to hold numBits, expanding it if necessary.
+               /// getNumWords() is unchanged by this call.
+               /// </summary>
+               public virtual void  EnsureCapacity(long numBits)
+               {
+                       EnsureCapacityWords(Bits2words(numBits));
+               }
+               
+               /// <summary>Lowers numWords, the number of words in use,
+               /// by checking for trailing zero words.
+               /// </summary>
+               public virtual void  TrimTrailingZeros()
+               {
+                       int idx = wlen - 1;
+                       while (idx >= 0 && bits[idx] == 0)
+                               idx--;
+                       wlen = idx + 1;
+               }
+               
+               /// <summary>returns the number of 64 bit words it would take to hold numBits </summary>
+               public static int Bits2words(long numBits)
+               {
+                       return (int) ((((numBits - 1) >> 6)) + 1);
+               }
+               
+               
+               /// <summary>returns true if both sets have the same bits set </summary>
+               public  override bool Equals(System.Object o)
+               {
+                       if (this == o)
+                               return true;
+                       if (!(o is OpenBitSet))
+                               return false;
+                       OpenBitSet a;
+                       OpenBitSet b = (OpenBitSet) o;
+                       // make a the larger set.
+                       if (b.wlen > this.wlen)
+                       {
+                               a = b; b = this;
+                       }
+                       else
+                       {
+                               a = this;
+                       }
+                       
+                       // check for any set bits out of the range of b
+                       for (int i = a.wlen - 1; i >= b.wlen; i--)
+                       {
+                               if (a.bits[i] != 0)
+                                       return false;
+                       }
+                       
+                       for (int i = b.wlen - 1; i >= 0; i--)
+                       {
+                               if (a.bits[i] != b.bits[i])
+                                       return false;
+                       }
+                       
+                       return true;
+               }
+
+        public override int GetHashCode()
+        {
+            // Start with a zero hash and use a mix that results in zero if the input is zero.
+            // This effectively truncates trailing zeros without an explicit check.
+            long h = 0;
+            for (int i = bits.Length; --i >= 0; )
+            {
+                h ^= bits[i];
+                h = (h << 1) | (SupportClass.Number.URShift(h, 63)); // rotate left
+            }
+            // fold leftmost bits into right and add a constant to prevent
+            // empty sets from returning 0, which is too common.
+            return (int)(((h >> 32) ^ h) + 0x98761234);
+        }
+
+               
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/OpenBitSetDISI.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/OpenBitSetDISI.cs
new file mode 100644 (file)
index 0000000..5a60d1b
--- /dev/null
@@ -0,0 +1,112 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using DocIdSetIterator = Mono.Lucene.Net.Search.DocIdSetIterator;
+
+namespace Mono.Lucene.Net.Util
+{
+       
+       [Serializable]
+       public class OpenBitSetDISI:OpenBitSet
+       {
+               
+               /// <summary>Construct an OpenBitSetDISI with its bits set
+               /// from the doc ids of the given DocIdSetIterator.
+               /// Also give a maximum size one larger than the largest doc id for which a
+               /// bit may ever be set on this OpenBitSetDISI.
+               /// </summary>
+               public OpenBitSetDISI(DocIdSetIterator disi, int maxSize):base(maxSize)
+               {
+                       InPlaceOr(disi);
+               }
+               
+               /// <summary>Construct an OpenBitSetDISI with no bits set, and a given maximum size
+               /// one larger than the largest doc id for which a bit may ever be set
+               /// on this OpenBitSetDISI.
+               /// </summary>
+               public OpenBitSetDISI(int maxSize):base(maxSize)
+               {
+               }
+               
+               /// <summary> Perform an inplace OR with the doc ids from a given DocIdSetIterator,
+               /// setting the bit for each such doc id.
+               /// These doc ids should be smaller than the maximum size passed to the
+               /// constructor.
+               /// </summary>
+               public virtual void  InPlaceOr(DocIdSetIterator disi)
+               {
+                       int doc;
+                       long size = Size();
+                       while ((doc = disi.NextDoc()) < size)
+                       {
+                               FastSet(doc);
+                       }
+               }
+               
+               /// <summary> Perform an inplace AND with the doc ids from a given DocIdSetIterator,
+               /// leaving only the bits set for which the doc ids are in common.
+               /// These doc ids should be smaller than the maximum size passed to the
+               /// constructor.
+               /// </summary>
+               public virtual void  InPlaceAnd(DocIdSetIterator disi)
+               {
+                       int bitSetDoc = NextSetBit(0);
+                       int disiDoc;
+                       while (bitSetDoc != - 1 && (disiDoc = disi.Advance(bitSetDoc)) != DocIdSetIterator.NO_MORE_DOCS)
+                       {
+                               Clear(bitSetDoc, disiDoc);
+                               bitSetDoc = NextSetBit(disiDoc + 1);
+                       }
+                       if (bitSetDoc != - 1)
+                       {
+                               Clear(bitSetDoc, Size());
+                       }
+               }
+               
+               /// <summary> Perform an inplace NOT with the doc ids from a given DocIdSetIterator,
+               /// clearing all the bits for each such doc id.
+               /// These doc ids should be smaller than the maximum size passed to the
+               /// constructor.
+               /// </summary>
+               public virtual void  InPlaceNot(DocIdSetIterator disi)
+               {
+                       int doc;
+                       long size = Size();
+                       while ((doc = disi.NextDoc()) < size)
+                       {
+                               FastClear(doc);
+                       }
+               }
+               
+               /// <summary> Perform an inplace XOR with the doc ids from a given DocIdSetIterator,
+               /// flipping all the bits for each such doc id.
+               /// These doc ids should be smaller than the maximum size passed to the
+               /// constructor.
+               /// </summary>
+               public virtual void  InPlaceXor(DocIdSetIterator disi)
+               {
+                       int doc;
+                       long size = Size();
+                       while ((doc = disi.NextDoc()) < size)
+                       {
+                               FastFlip(doc);
+                       }
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/OpenBitSetIterator.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/OpenBitSetIterator.cs
new file mode 100644 (file)
index 0000000..73f5106
--- /dev/null
@@ -0,0 +1,217 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using DocIdSetIterator = Mono.Lucene.Net.Search.DocIdSetIterator;
+
+namespace Mono.Lucene.Net.Util
+{
+       
+       /// <summary>An iterator to iterate over set bits in an OpenBitSet.
+       /// This is faster than nextSetBit() for iterating over the complete set of bits,
+       /// especially when the density of the bits set is high.
+       /// 
+       /// </summary>
+       /// <version>  $Id$
+       /// </version>
+       public class OpenBitSetIterator:DocIdSetIterator
+       {
+               
+               // The General Idea: instead of having an array per byte that has
+               // the offsets of the next set bit, that array could be
+               // packed inside a 32 bit integer (8 4 bit numbers).  That
+               // should be faster than accessing an array for each index, and
+               // the total array size is kept smaller (256*sizeof(int))=1K
+               protected internal static readonly uint[] bitlist = new uint[]{0x0, 0x1, 0x2, 0x21, 0x3, 0x31, 0x32, 0x321, 0x4, 0x41, 0x42, 0x421, 0x43, 0x431, 0x432, 0x4321, 0x5, 0x51, 0x52, 0x521, 0x53, 0x531, 0x532, 0x5321, 0x54, 0x541, 0x542, 0x5421, 0x543, 0x5431, 0x5432, 0x54321, 0x6, 0x61, 0x62, 0x621, 0x63, 0x631, 0x632, 0x6321, 0x64, 0x641, 0x642, 0x6421, 0x643, 0x6431, 0x6432, 0x64321, 0x65, 0x651, 0x652, 0x6521, 0x653, 0x6531, 0x6532, 0x65321, 0x654, 0x6541, 0x6542, 0x65421, 0x6543, 0x65431, 0x65432, 0x654321, 0x7, 0x71, 0x72, 0x721, 0x73, 0x731, 0x732, 0x7321, 0x74, 0x741, 0x742, 0x7421, 0x743, 0x7431, 0x7432, 0x74321, 0x75, 0x751, 0x752, 0x7521, 0x753, 0x7531, 0x7532, 0x75321, 0x754, 0x7541, 0x7542, 0x75421, 0x7543, 0x75431, 0x75432, 0x754321, 0x76, 0x761, 0x762, 0x7621, 0x763, 0x7631, 0x7632, 0x76321, 0x764, 0x7641, 0x7642, 0x76421, 0x7643, 0x76431, 0x76432, 0x764321, 0x765, 0x7651, 0x7652, 0x76521, 0x7653, 0x76531, 0x76532, 0x765321, 0x7654, 0x76541, 0x76542, 0x765421, 0x76543, 0x765431, 0x765432, 0x7654321, 0x8, 0x81, 0x82, 0x821, 0x83, 0x831, 0x832, 0x8321, 0x84, 0x841, 0x842, 0x8421, 0x843, 0x8431, 0x8432, 0x84321, 0x85, 0x851, 0x852, 0x8521, 0x853, 0x8531, 0x8532, 0x85321, 0x854, 0x8541, 0x8542, 0x85421, 0x8543, 0x85431, 0x85432, 0x854321, 0x86, 0x861, 0x862, 0x8621, 0x863, 0x8631, 0x8632, 0x86321, 0x864, 0x8641, 0x8642, 0x86421, 0x8643, 0x86431, 0x86432, 0x864321, 0x865, 0x8651, 0x8652, 0x86521, 0x8653, 0x86531, 0x86532, 0x865321, 0x8654, 0x86541, 0x86542, 0x865421, 0x86543, 0x865431, 0x865432, 0x8654321, 0x87, 0x871, 0x872, 0x8721, 0x873, 0x8731, 0x8732, 0x87321, 0x874, 0x8741, 0x8742, 0x87421, 0x8743, 0x87431, 0x87432, 0x874321, 0x875, 0x8751, 0x8752, 0x87521, 0x8753, 0x87531, 0x87532, 0x875321, 0x8754, 0x87541, 0x87542, 0x875421, 0x87543, 0x875431, 0x875432, 0x8754321, 0x876, 0x8761, 0x8762, 0x87621, 0x8763, 0x87631, 0x87632, 0x876321, 0x8764, 0x87641, 0x87642, 0x876421, 0x87643, 0x876431, 0x876432, 0x8764321, 0x8765, 0x87651, 0x87652, 0x876521, 0x87653, 0x876531, 0x876532, 0x8765321, 0x87654, 
+                       0x876541, 0x876542, 0x8765421, 0x876543, 0x8765431, 0x8765432, 0x87654321};
+               /// <summary>** the python code that generated bitlist
+               /// def bits2int(val):
+               /// arr=0
+               /// for shift in range(8,0,-1):
+               /// if val &amp; 0x80:
+        /// arr = (arr &lt;&lt; 4) | shift
+        /// val = val &lt;&lt; 1
+               /// return arr
+               /// def int_table():
+               /// tbl = [ hex(bits2int(val)).strip('L') for val in range(256) ]
+               /// return ','.join(tbl)
+               /// ****
+               /// </summary>
+               
+               // hmmm, what about an iterator that finds zeros though,
+               // or a reverse iterator... should they be separate classes
+               // for efficiency, or have a common root interface?  (or
+               // maybe both?  could ask for a SetBitsIterator, etc...
+               
+               private long[] arr;
+               private int words;
+               private int i = - 1;
+               private long word;
+               private int wordShift;
+               private int indexArray;
+               private int curDocId = - 1;
+               
+               public OpenBitSetIterator(OpenBitSet obs):this(obs.GetBits(), obs.GetNumWords())
+               {
+               }
+               
+               public OpenBitSetIterator(long[] bits, int numWords)
+               {
+                       arr = bits;
+                       words = numWords;
+               }
+               
+               // 64 bit shifts
+               private void  Shift()
+               {
+                       if ((int) word == 0)
+                       {
+                               wordShift += 32; word = (long) ((ulong) word >> 32);
+                       }
+                       if ((word & 0x0000FFFF) == 0)
+                       {
+                               wordShift += 16; word = (long) ((ulong) word >> 16);
+                       }
+                       if ((word & 0x000000FF) == 0)
+                       {
+                               wordShift += 8; word = (long) ((ulong) word >> 8);
+                       }
+                       indexArray = (int) bitlist[word & 0xff];
+               }
+               
+               /// <summary>** alternate shift implementations
+               /// // 32 bit shifts, but a long shift needed at the end
+               /// private void shift2() {
+               /// int y = (int)word;
+               /// if (y==0) {wordShift +=32; y = (int)(word >>>32); }
+               /// if ((y & 0x0000FFFF) == 0) { wordShift +=16; y>>>=16; }
+               /// if ((y & 0x000000FF) == 0) { wordShift +=8; y>>>=8; }
+               /// indexArray = bitlist[y & 0xff];
+               /// word >>>= (wordShift +1);
+               /// }
+               /// private void shift3() {
+               /// int lower = (int)word;
+               /// int lowByte = lower & 0xff;
+               /// if (lowByte != 0) {
+               /// indexArray=bitlist[lowByte];
+               /// return;
+               /// }
+               /// shift();
+               /// }
+               /// ****
+               /// </summary>
+               
+               /// <deprecated> use {@link #NextDoc()} instead. 
+               /// </deprecated>
+        [Obsolete("use NextDoc() instead.")]
+               public override bool Next()
+               {
+                       return NextDoc() != NO_MORE_DOCS;
+               }
+               
+               public override int NextDoc()
+               {
+                       if (indexArray == 0)
+                       {
+                               if (word != 0)
+                               {
+                                       word = (long) ((ulong) word >> 8);
+                                       wordShift += 8;
+                               }
+                               
+                               while (word == 0)
+                               {
+                                       if (++i >= words)
+                                       {
+                                               return curDocId = NO_MORE_DOCS;
+                                       }
+                                       word = arr[i];
+                                       wordShift = - 1; // loop invariant code motion should move this
+                               }
+                               
+                               // after the first time, should I go with a linear search, or
+                               // stick with the binary search in shift?
+                               Shift();
+                       }
+                       
+                       int bitIndex = (indexArray & 0x0f) + wordShift;
+                       indexArray = (int) ((uint) indexArray >> 4);
+                       // should i<<6 be cached as a separate variable?
+                       // it would only save one cycle in the best circumstances.
+                       return curDocId = (i << 6) + bitIndex;
+               }
+               
+               /// <deprecated> use {@link #Advance(int)} instead. 
+               /// </deprecated>
+        [Obsolete("use Advance(int) instead.")]
+               public override bool SkipTo(int target)
+               {
+                       return Advance(target) != NO_MORE_DOCS;
+               }
+               
+               public override int Advance(int target)
+               {
+                       indexArray = 0;
+                       i = target >> 6;
+                       if (i >= words)
+                       {
+                               word = 0; // setup so next() will also return -1
+                               return curDocId = NO_MORE_DOCS;
+                       }
+                       wordShift = target & 0x3f;
+                       word = (long) ((ulong) arr[i] >> wordShift);
+                       if (word != 0)
+                       {
+                               wordShift--; // compensate for 1 based arrIndex
+                       }
+                       else
+                       {
+                               while (word == 0)
+                               {
+                                       if (++i >= words)
+                                       {
+                                               return curDocId = NO_MORE_DOCS;
+                                       }
+                                       word = arr[i];
+                               }
+                               wordShift = - 1;
+                       }
+                       
+                       Shift();
+                       
+                       int bitIndex = (indexArray & 0x0f) + wordShift;
+                       indexArray = (int) ((uint) indexArray >> 4);
+                       // should i<<6 be cached as a separate variable?
+                       // it would only save one cycle in the best circumstances.
+                       return curDocId = (i << 6) + bitIndex;
+               }
+               
+               /// <deprecated> use {@link #DocID()} instead. 
+               /// </deprecated>
+        [Obsolete("use DocID() instead.")]
+               public override int Doc()
+               {
+                       return curDocId;
+               }
+               
+               public override int DocID()
+               {
+                       return curDocId;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/Package.html b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/Package.html
new file mode 100644 (file)
index 0000000..5d96bed
--- /dev/null
@@ -0,0 +1,25 @@
+<!doctype html public "-//w3c//dtd html 4.0 transitional//en">\r
+<!--\r
+ Licensed to the Apache Software Foundation (ASF) under one or more\r
+ contributor license agreements.  See the NOTICE file distributed with\r
+ this work for additional information regarding copyright ownership.\r
+ The ASF licenses this file to You under the Apache License, Version 2.0\r
+ (the "License"); you may not use this file except in compliance with\r
+ the License.  You may obtain a copy of the License at\r
+\r
+     http://www.apache.org/licenses/LICENSE-2.0\r
+\r
+ Unless required by applicable law or agreed to in writing, software\r
+ distributed under the License is distributed on an "AS IS" BASIS,\r
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
+ See the License for the specific language governing permissions and\r
+ limitations under the License.\r
+-->\r
+<html>\r
+<head>\r
+   <meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">\r
+</head>\r
+<body>\r
+Some utility classes.\r
+</body>\r
+</html>\r
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/Parameter.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/Parameter.cs
new file mode 100644 (file)
index 0000000..804bf46
--- /dev/null
@@ -0,0 +1,82 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+using System.Collections.Generic;
+
+namespace Mono.Lucene.Net.Util
+{
+       
+       /// <summary> A serializable Enum class.</summary>
+       [Serializable]
+    public abstract class Parameter 
+       {
+        internal static Dictionary<string, Parameter> allParameters = new Dictionary<string, Parameter>();
+               
+               private System.String name;
+               
+               private Parameter()
+               {
+                       // typesafe enum pattern, no public constructor
+               }
+               
+               protected internal Parameter(System.String name)
+               {
+                       // typesafe enum pattern, no public constructor
+                       this.name = name;
+                       string key = MakeKey(name);
+                       
+                       if (allParameters.ContainsKey(key))
+                               throw new System.ArgumentException("Parameter name " + key + " already used!");
+                       
+                       allParameters[key] = this;
+               }
+               
+               private string MakeKey(System.String name)
+               {
+                       return GetType() + " " + name;
+               }
+               
+               public override string ToString()
+               {
+                       return name;
+               }
+               
+               /// <summary> Resolves the deserialized instance to the local reference for accurate
+               /// equals() and == comparisons.
+               /// 
+               /// </summary>
+               /// <returns> a reference to Parameter as resolved in the local VM
+               /// </returns>
+               /// <throws>  ObjectStreamException </throws>
+        //protected internal virtual System.Object ReadResolve()
+        //{
+        //    System.Object par = allParameters[MakeKey(name)];
+                       
+        //    if (par == null)
+        //        throw new System.IO.IOException("Unknown parameter value: " + name);
+                       
+        //    return par;
+        //}
+
+        public override bool Equals(object obj)
+        {
+            if (obj.GetType() != this.GetType()) return false;
+            return this.name.Equals((obj as Parameter).name);
+        }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/PriorityQueue.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/PriorityQueue.cs
new file mode 100644 (file)
index 0000000..be30111
--- /dev/null
@@ -0,0 +1,340 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Util
+{
+       
+       /// <summary>A PriorityQueue maintains a partial ordering of its elements such that the
+       /// least element can always be found in constant time.  Put()'s and pop()'s
+       /// require log(size) time.
+       /// 
+       /// <p/><b>NOTE</b>: This class pre-allocates a full array of
+       /// length <code>maxSize+1</code>, in {@link #initialize}.
+       /// 
+       /// </summary>
+       public abstract class PriorityQueue
+       {
+               private int size;
+               private int maxSize;
+               protected internal System.Object[] heap;
+               
+               /// <summary>Determines the ordering of objects in this priority queue.  Subclasses
+               /// must define this one method. 
+               /// </summary>
+               public abstract bool LessThan(System.Object a, System.Object b);
+               
+               /// <summary> This method can be overridden by extending classes to return a sentinel
+               /// object which will be used by {@link #Initialize(int)} to fill the queue, so
+               /// that the code which uses that queue can always assume it's full and only
+               /// change the top without attempting to insert any new object.<br/>
+               /// 
+               /// Those sentinel values should always compare worse than any non-sentinel
+               /// value (i.e., {@link #LessThan(Object, Object)} should always favor the
+               /// non-sentinel values).<br/>
+               /// 
+               /// By default, this method returns false, which means the queue will not be
+               /// filled with sentinel values. Otherwise, the value returned will be used to
+               /// pre-populate the queue. Adds sentinel values to the queue.<br/>
+               /// 
+               /// If this method is extended to return a non-null value, then the following
+               /// usage pattern is recommended:
+               /// 
+               /// <pre>
+               /// // extends getSentinelObject() to return a non-null value.
+               /// PriorityQueue pq = new MyQueue(numHits);
+               /// // save the 'top' element, which is guaranteed to not be null.
+               /// MyObject pqTop = (MyObject) pq.top();
+               /// &lt;...&gt;
+               /// // now in order to add a new element, which is 'better' than top (after 
+               /// // you've verified it is better), it is as simple as:
+               /// pqTop.change().
+               /// pqTop = pq.updateTop();
+               /// </pre>
+               /// 
+               /// <b>NOTE:</b> if this method returns a non-null value, it will be called by
+               /// {@link #Initialize(int)} {@link #Size()} times, relying on a new object to
+               /// be returned and will not check if it's null again. Therefore you should
+               /// ensure any call to this method creates a new instance and behaves
+               /// consistently, e.g., it cannot return null if it previously returned
+               /// non-null.
+               /// 
+               /// </summary>
+               /// <returns> the sentinel object to use to pre-populate the queue, or null if
+               /// sentinel objects are not supported.
+               /// </returns>
+               protected internal virtual System.Object GetSentinelObject()
+               {
+                       return null;
+               }
+               
+               /// <summary>Subclass constructors must call this. </summary>
+               protected internal void  Initialize(int maxSize)
+               {
+                       size = 0;
+                       int heapSize;
+            if (0 == maxSize)
+                // We allocate 1 extra to avoid if statement in top()
+                heapSize = 2;
+            else
+            {
+                if (maxSize == Int32.MaxValue)
+                {
+                    // Don't wrap heapSize to -1, in this case, which
+                    // causes a confusing NegativeArraySizeException.
+                    // Note that very likely this will simply then hit
+                    // an OOME, but at least that's more indicative to
+                    // caller that this values is too big.  We don't +1
+                    // in this case, but it's very unlikely in practice
+                    // one will actually insert this many objects into
+                    // the PQ:
+                    heapSize = Int32.MaxValue;
+                }
+                else
+                {
+                    // NOTE: we add +1 because all access to heap is
+                    // 1-based not 0-based.  heap[0] is unused.
+                    heapSize = maxSize + 1;
+                }
+            }
+                       heap = new System.Object[heapSize];
+                       this.maxSize = maxSize;
+                       
+                       // If sentinel objects are supported, populate the queue with them
+                       System.Object sentinel = GetSentinelObject();
+                       if (sentinel != null)
+                       {
+                               heap[1] = sentinel;
+                               for (int i = 2; i < heap.Length; i++)
+                               {
+                                       heap[i] = GetSentinelObject();
+                               }
+                               size = maxSize;
+                       }
+               }
+               
+               /// <summary> Adds an Object to a PriorityQueue in log(size) time. If one tries to add
+               /// more objects than maxSize from initialize a RuntimeException
+               /// (ArrayIndexOutOfBound) is thrown.
+               /// 
+               /// </summary>
+               /// <deprecated> use {@link #Add(Object)} which returns the new top object,
+               /// saving an additional call to {@link #Top()}.
+               /// </deprecated>
+        [Obsolete("use Add(Object) which returns the new top object, saving an additional call to Top().")]
+               public void  Put(System.Object element)
+               {
+                       size++;
+                       heap[size] = element;
+                       UpHeap();
+               }
+               
+               /// <summary> Adds an Object to a PriorityQueue in log(size) time. If one tries to add
+               /// more objects than maxSize from initialize an
+               /// {@link ArrayIndexOutOfBoundsException} is thrown.
+               /// 
+               /// </summary>
+               /// <returns> the new 'top' element in the queue.
+               /// </returns>
+               public System.Object Add(System.Object element)
+               {
+                       size++;
+                       heap[size] = element;
+                       UpHeap();
+                       return heap[1];
+               }
+               
+               /// <summary> Adds element to the PriorityQueue in log(size) time if either the
+               /// PriorityQueue is not full, or not lessThan(element, top()).
+               /// 
+               /// </summary>
+               /// <param name="element">
+               /// </param>
+               /// <returns> true if element is added, false otherwise.
+               /// </returns>
+               /// <deprecated> use {@link #InsertWithOverflow(Object)} instead, which
+               /// encourages objects reuse.
+               /// </deprecated>
+        [Obsolete("use InsertWithOverflow(Object) instead, which encourages objects reuse.")]
+               public virtual bool Insert(System.Object element)
+               {
+                       return InsertWithOverflow(element) != element;
+               }
+               
+               /// <summary> insertWithOverflow() is the same as insert() except its
+               /// return value: it returns the object (if any) that was
+               /// dropped off the heap because it was full. This can be
+               /// the given parameter (in case it is smaller than the
+               /// full heap's minimum, and couldn't be added), or another
+               /// object that was previously the smallest value in the
+               /// heap and now has been replaced by a larger one, or null
+               /// if the queue wasn't yet full with maxSize elements.
+               /// </summary>
+               public virtual System.Object InsertWithOverflow(System.Object element)
+               {
+                       if (size < maxSize)
+                       {
+                               Put(element);
+                               return null;
+                       }
+                       else if (size > 0 && !LessThan(element, heap[1]))
+                       {
+                               System.Object ret = heap[1];
+                               heap[1] = element;
+                               AdjustTop();
+                               return ret;
+                       }
+                       else
+                       {
+                               return element;
+                       }
+               }
+               
+               /// <summary>Returns the least element of the PriorityQueue in constant time. </summary>
+               public System.Object Top()
+               {
+                       // We don't need to check size here: if maxSize is 0,
+                       // then heap is length 2 array with both entries null.
+                       // If size is 0 then heap[1] is already null.
+                       return heap[1];
+               }
+               
+               /// <summary>Removes and returns the least element of the PriorityQueue in log(size)
+               /// time. 
+               /// </summary>
+               public System.Object Pop()
+               {
+                       if (size > 0)
+                       {
+                               System.Object result = heap[1]; // save first value
+                               heap[1] = heap[size]; // move last to first
+                               heap[size] = null; // permit GC of objects
+                               size--;
+                               DownHeap(); // adjust heap
+                               return result;
+                       }
+                       else
+                               return null;
+               }
+               
+               /// <summary> Should be called when the Object at top changes values. Still log(n) worst
+               /// case, but it's at least twice as fast to
+               /// 
+               /// <pre>
+               /// pq.top().change();
+               /// pq.adjustTop();
+               /// </pre>
+               /// 
+               /// instead of
+               /// 
+               /// <pre>
+               /// o = pq.pop();
+               /// o.change();
+               /// pq.push(o);
+               /// </pre>
+               /// 
+               /// </summary>
+               /// <deprecated> use {@link #UpdateTop()} which returns the new top element and
+               /// saves an additional call to {@link #Top()}.
+               /// </deprecated>
+        [Obsolete("use UpdateTop() which returns the new top element and saves an additional call to Top()")]
+               public void  AdjustTop()
+               {
+                       DownHeap();
+               }
+               
+               /// <summary> Should be called when the Object at top changes values. Still log(n) worst
+               /// case, but it's at least twice as fast to
+               /// 
+               /// <pre>
+               /// pq.top().change();
+               /// pq.updateTop();
+               /// </pre>
+               /// 
+               /// instead of
+               /// 
+               /// <pre>
+               /// o = pq.pop();
+               /// o.change();
+               /// pq.push(o);
+               /// </pre>
+               /// 
+               /// </summary>
+               /// <returns> the new 'top' element.
+               /// </returns>
+               public System.Object UpdateTop()
+               {
+                       DownHeap();
+                       return heap[1];
+               }
+               
+               /// <summary>Returns the number of elements currently stored in the PriorityQueue. </summary>
+               public int Size()
+               {
+                       return size;
+               }
+               
+               /// <summary>Removes all entries from the PriorityQueue. </summary>
+               public void  Clear()
+               {
+                       for (int i = 0; i <= size; i++)
+                       {
+                               heap[i] = null;
+                       }
+                       size = 0;
+               }
+               
+               private void  UpHeap()
+               {
+                       int i = size;
+                       System.Object node = heap[i]; // save bottom node
+                       int j = SupportClass.Number.URShift(i, 1);
+                       while (j > 0 && LessThan(node, heap[j]))
+                       {
+                               heap[i] = heap[j]; // shift parents down
+                               i = j;
+                               j = SupportClass.Number.URShift(j, 1);
+                       }
+                       heap[i] = node; // install saved node
+               }
+               
+               private void  DownHeap()
+               {
+                       int i = 1;
+                       System.Object node = heap[i]; // save top node
+                       int j = i << 1; // find smaller child
+                       int k = j + 1;
+                       if (k <= size && LessThan(heap[k], heap[j]))
+                       {
+                               j = k;
+                       }
+                       while (j <= size && LessThan(heap[j], node))
+                       {
+                               heap[i] = heap[j]; // shift up child
+                               i = j;
+                               j = i << 1;
+                               k = j + 1;
+                               if (k <= size && LessThan(heap[k], heap[j]))
+                               {
+                                       j = k;
+                               }
+                       }
+                       heap[i] = node; // install saved node
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/RamUsageEstimator.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/RamUsageEstimator.cs
new file mode 100644 (file)
index 0000000..c32c6a0
--- /dev/null
@@ -0,0 +1,219 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Util
+{
+       
+       /// <summary> Estimates the size of a given Object using a given MemoryModel for primitive
+       /// size information.
+       /// 
+       /// Resource Usage: 
+       /// 
+       /// Internally uses a Map to temporally hold a reference to every
+       /// object seen. 
+       /// 
+       /// If checkIntered, all Strings checked will be interned, but those
+       /// that were not already interned will be released for GC when the
+       /// estimate is complete.
+       /// </summary>
+       public sealed class RamUsageEstimator
+       {
+               private MemoryModel memoryModel;
+               
+               private System.Collections.IDictionary seen;
+               
+               private int refSize;
+               private int arraySize;
+               private int classSize;
+               
+               private bool checkInterned;
+               
+               /// <summary> Constructs this object with an AverageGuessMemoryModel and
+               /// checkInterned = true.
+               /// </summary>
+               public RamUsageEstimator():this(new AverageGuessMemoryModel())
+               {
+               }
+               
+               /// <param name="checkInterned">check if Strings are interned and don't add to size
+               /// if they are. Defaults to true but if you know the objects you are checking
+               /// won't likely contain many interned Strings, it will be faster to turn off
+               /// intern checking.
+               /// </param>
+               public RamUsageEstimator(bool checkInterned):this(new AverageGuessMemoryModel(), checkInterned)
+               {
+               }
+               
+               /// <param name="memoryModel">MemoryModel to use for primitive object sizes.
+               /// </param>
+               public RamUsageEstimator(MemoryModel memoryModel):this(memoryModel, true)
+               {
+               }
+               
+               /// <param name="memoryModel">MemoryModel to use for primitive object sizes.
+               /// </param>
+               /// <param name="checkInterned">check if Strings are interned and don't add to size
+               /// if they are. Defaults to true but if you know the objects you are checking
+               /// won't likely contain many interned Strings, it will be faster to turn off
+               /// intern checking.
+               /// </param>
+               public RamUsageEstimator(MemoryModel memoryModel, bool checkInterned)
+               {
+                       this.memoryModel = memoryModel;
+                       this.checkInterned = checkInterned;
+                       // Use Map rather than Set so that we can use an IdentityHashMap - not
+                       // seeing an IdentityHashSet
+            seen = new System.Collections.Hashtable(64);    // {{Aroush-2.9}} Port issue; need to mimic java's IdentityHashMap equals() through C#'s Equals()
+                       this.refSize = memoryModel.GetReferenceSize();
+                       this.arraySize = memoryModel.GetArraySize();
+                       this.classSize = memoryModel.GetClassSize();
+               }
+               
+               public long EstimateRamUsage(System.Object obj)
+               {
+                       long size = Size(obj);
+                       seen.Clear();
+                       return size;
+               }
+               
+               private long Size(System.Object obj)
+               {
+                       if (obj == null)
+                       {
+                               return 0;
+                       }
+                       // interned not part of this object
+                       if (checkInterned && obj is System.String && obj == (System.Object) String.Intern(((System.String) obj)))
+                       {
+                               // interned string will be eligible
+                               // for GC on
+                               // estimateRamUsage(Object) return
+                               return 0;
+                       }
+                       
+                       // skip if we have seen before
+                       if (seen.Contains(obj))
+                       {
+                               return 0;
+                       }
+                       
+                       // add to seen
+                       seen[obj] = null;
+                       
+                       System.Type clazz = obj.GetType();
+                       if (clazz.IsArray)
+                       {
+                               return SizeOfArray(obj);
+                       }
+                       
+                       long size = 0;
+                       
+                       // walk type hierarchy
+                       while (clazz != null)
+                       {
+                               System.Reflection.FieldInfo[] fields = clazz.GetFields(System.Reflection.BindingFlags.Instance | System.Reflection.BindingFlags.NonPublic | System.Reflection.BindingFlags.Public | System.Reflection.BindingFlags.DeclaredOnly | System.Reflection.BindingFlags.Static);
+                               for (int i = 0; i < fields.Length; i++)
+                               {
+                                       if (fields[i].IsStatic)
+                                       {
+                                               continue;
+                                       }
+                                       
+                                       if (fields[i].FieldType.IsPrimitive)
+                                       {
+                                               size += memoryModel.GetPrimitiveSize(fields[i].FieldType);
+                                       }
+                                       else
+                                       {
+                                               size += refSize;
+                        fields[i].GetType(); 
+                                               try
+                                               {
+                                                       System.Object value_Renamed = fields[i].GetValue(obj);
+                                                       if (value_Renamed != null)
+                                                       {
+                                                               size += Size(value_Renamed);
+                                                       }
+                                               }
+                                               catch (System.UnauthorizedAccessException ex)
+                                               {
+                                                       // ignore for now?
+                                               }
+                                       }
+                               }
+                               clazz = clazz.BaseType;
+                       }
+                       size += classSize;
+                       return size;
+               }
+               
+               private long SizeOfArray(System.Object obj)
+               {
+                       int len = ((System.Array) obj).Length;
+                       if (len == 0)
+                       {
+                               return 0;
+                       }
+                       long size = arraySize;
+                       System.Type arrayElementClazz = obj.GetType().GetElementType();
+                       if (arrayElementClazz.IsPrimitive)
+                       {
+                               size += len * memoryModel.GetPrimitiveSize(arrayElementClazz);
+                       }
+                       else
+                       {
+                               for (int i = 0; i < len; i++)
+                               {
+                                       size += refSize + Size(((System.Array) obj).GetValue(i));
+                               }
+                       }
+                       
+                       return size;
+               }
+               
+               private const long ONE_KB = 1024;
+               private static readonly long ONE_MB = ONE_KB * ONE_KB;
+               private static readonly long ONE_GB = ONE_KB * ONE_MB;
+               
+               /// <summary> Return good default units based on byte size.</summary>
+               public static System.String HumanReadableUnits(long bytes, System.IFormatProvider df)
+               {
+                       System.String newSizeAndUnits;
+                       
+                       if (bytes / ONE_GB > 0)
+                       {
+                               newSizeAndUnits = System.Convert.ToString(((float) bytes / ONE_GB), df) + " GB";
+                       }
+                       else if (bytes / ONE_MB > 0)
+                       {
+                               newSizeAndUnits = System.Convert.ToString((float) bytes / ONE_MB, df) + " MB";
+                       }
+                       else if (bytes / ONE_KB > 0)
+                       {
+                               newSizeAndUnits = System.Convert.ToString((float) bytes / ONE_KB, df) + " KB";
+                       }
+                       else
+                       {
+                               newSizeAndUnits = System.Convert.ToString(bytes) + " bytes";
+                       }
+                       
+                       return newSizeAndUnits;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/ReaderUtil.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/ReaderUtil.cs
new file mode 100644 (file)
index 0000000..cae51db
--- /dev/null
@@ -0,0 +1,128 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using IndexReader = Mono.Lucene.Net.Index.IndexReader;
+
+namespace Mono.Lucene.Net.Util
+{
+       
+       /// <summary> Common util methods for dealing with {@link IndexReader}s.
+       /// 
+       /// </summary>
+       public class ReaderUtil
+       {
+               
+               /// <summary> Gathers sub-readers from reader into a List.
+               /// 
+               /// </summary>
+               /// <param name="allSubReaders">
+               /// </param>
+               /// <param name="reader">
+               /// </param>
+               public static void  GatherSubReaders(System.Collections.IList allSubReaders, IndexReader reader)
+               {
+                       IndexReader[] subReaders = reader.GetSequentialSubReaders();
+                       if (subReaders == null)
+                       {
+                               // Add the reader itself, and do not recurse
+                               allSubReaders.Add(reader);
+                       }
+                       else
+                       {
+                               for (int i = 0; i < subReaders.Length; i++)
+                               {
+                                       GatherSubReaders(allSubReaders, subReaders[i]);
+                               }
+                       }
+               }
+               
+               /// <summary> Returns sub IndexReader that contains the given document id.
+               /// 
+               /// </summary>
+               /// <param name="doc">id of document
+               /// </param>
+               /// <param name="reader">parent reader
+               /// </param>
+               /// <returns> sub reader of parent which contains the specified doc id
+               /// </returns>
+               public static IndexReader SubReader(int doc, IndexReader reader)
+               {
+                       System.Collections.ArrayList subReadersList = new System.Collections.ArrayList();
+                       ReaderUtil.GatherSubReaders(subReadersList, reader);
+                       IndexReader[] subReaders = (IndexReader[]) subReadersList.ToArray(typeof(IndexReader));
+                       int[] docStarts = new int[subReaders.Length];
+                       int maxDoc = 0;
+                       for (int i = 0; i < subReaders.Length; i++)
+                       {
+                               docStarts[i] = maxDoc;
+                               maxDoc += subReaders[i].MaxDoc();
+                       }
+                       return subReaders[ReaderUtil.SubIndex(doc, docStarts)];
+               }
+               
+               /// <summary> Returns sub-reader subIndex from reader.
+               /// 
+               /// </summary>
+               /// <param name="reader">parent reader
+               /// </param>
+               /// <param name="subIndex">index of desired sub reader
+               /// </param>
+               /// <returns> the subreader at subINdex
+               /// </returns>
+               public static IndexReader SubReader(IndexReader reader, int subIndex)
+               {
+                       System.Collections.ArrayList subReadersList = new System.Collections.ArrayList();
+                       ReaderUtil.GatherSubReaders(subReadersList, reader);
+                       IndexReader[] subReaders = (IndexReader[]) subReadersList.ToArray(typeof(IndexReader));
+                       return subReaders[subIndex];
+               }
+               
+               
+               /// <summary> Returns index of the searcher/reader for document <code>n</code> in the
+               /// array used to construct this searcher/reader.
+               /// </summary>
+               public static int SubIndex(int n, int[] docStarts)
+               {
+                       // find
+                       // searcher/reader for doc n:
+                       int size = docStarts.Length;
+                       int lo = 0; // search starts array
+                       int hi = size - 1; // for first element less than n, return its index
+                       while (hi >= lo)
+                       {
+                               int mid = SupportClass.Number.URShift((lo + hi), 1);
+                               int midValue = docStarts[mid];
+                               if (n < midValue)
+                                       hi = mid - 1;
+                               else if (n > midValue)
+                                       lo = mid + 1;
+                               else
+                               {
+                                       // found a match
+                                       while (mid + 1 < size && docStarts[mid + 1] == midValue)
+                                       {
+                                               mid++; // scan to last match
+                                       }
+                                       return mid;
+                               }
+                       }
+                       return hi;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/ScorerDocQueue.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/ScorerDocQueue.cs
new file mode 100644 (file)
index 0000000..ac035a0
--- /dev/null
@@ -0,0 +1,275 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+/* Derived from Mono.Lucene.Net.Util.PriorityQueue of March 2005 */
+using System;
+
+using DocIdSetIterator = Mono.Lucene.Net.Search.DocIdSetIterator;
+using Scorer = Mono.Lucene.Net.Search.Scorer;
+
+namespace Mono.Lucene.Net.Util
+{
+       
+       /// <summary>A ScorerDocQueue maintains a partial ordering of its Scorers such that the
+       /// least Scorer can always be found in constant time.  Put()'s and pop()'s
+       /// require log(size) time. The ordering is by Scorer.doc().
+       /// </summary>
+       public class ScorerDocQueue
+       {
+               // later: SpansQueue for spans with doc and term positions
+               private HeapedScorerDoc[] heap;
+               private int maxSize;
+               private int size;
+               
+               private class HeapedScorerDoc
+               {
+                       private void  InitBlock(ScorerDocQueue enclosingInstance)
+                       {
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private ScorerDocQueue enclosingInstance;
+                       public ScorerDocQueue Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       internal Scorer scorer;
+                       internal int doc;
+                       
+                       internal HeapedScorerDoc(ScorerDocQueue enclosingInstance, Scorer s):this(enclosingInstance, s, s.DocID())
+                       {
+                       }
+                       
+                       internal HeapedScorerDoc(ScorerDocQueue enclosingInstance, Scorer scorer, int doc)
+                       {
+                               InitBlock(enclosingInstance);
+                               this.scorer = scorer;
+                               this.doc = doc;
+                       }
+                       
+                       internal virtual void  Adjust()
+                       {
+                               doc = scorer.DocID();
+                       }
+               }
+               
+               private HeapedScorerDoc topHSD; // same as heap[1], only for speed
+               
+               /// <summary>Create a ScorerDocQueue with a maximum size. </summary>
+               public ScorerDocQueue(int maxSize)
+               {
+                       // assert maxSize >= 0;
+                       size = 0;
+                       int heapSize = maxSize + 1;
+                       heap = new HeapedScorerDoc[heapSize];
+                       this.maxSize = maxSize;
+                       topHSD = heap[1]; // initially null
+               }
+               
+               /// <summary> Adds a Scorer to a ScorerDocQueue in log(size) time.
+               /// If one tries to add more Scorers than maxSize
+               /// a RuntimeException (ArrayIndexOutOfBound) is thrown.
+               /// </summary>
+               public void  Put(Scorer scorer)
+               {
+                       size++;
+                       heap[size] = new HeapedScorerDoc(this, scorer);
+                       UpHeap();
+               }
+               
+               /// <summary> Adds a Scorer to the ScorerDocQueue in log(size) time if either
+               /// the ScorerDocQueue is not full, or not lessThan(scorer, top()).
+               /// </summary>
+               /// <param name="scorer">
+               /// </param>
+               /// <returns> true if scorer is added, false otherwise.
+               /// </returns>
+               public virtual bool Insert(Scorer scorer)
+               {
+                       if (size < maxSize)
+                       {
+                               Put(scorer);
+                               return true;
+                       }
+                       else
+                       {
+                               int docNr = scorer.DocID();
+                               if ((size > 0) && (!(docNr < topHSD.doc)))
+                               {
+                                       // heap[1] is top()
+                                       heap[1] = new HeapedScorerDoc(this, scorer, docNr);
+                                       DownHeap();
+                                       return true;
+                               }
+                               else
+                               {
+                                       return false;
+                               }
+                       }
+               }
+               
+               /// <summary>Returns the least Scorer of the ScorerDocQueue in constant time.
+               /// Should not be used when the queue is empty.
+               /// </summary>
+               public Scorer Top()
+               {
+                       // assert size > 0;
+                       return topHSD.scorer;
+               }
+               
+               /// <summary>Returns document number of the least Scorer of the ScorerDocQueue
+               /// in constant time.
+               /// Should not be used when the queue is empty.
+               /// </summary>
+               public int TopDoc()
+               {
+                       // assert size > 0;
+                       return topHSD.doc;
+               }
+               
+               public float TopScore()
+               {
+                       // assert size > 0;
+                       return topHSD.scorer.Score();
+               }
+               
+               public bool TopNextAndAdjustElsePop()
+               {
+                       return CheckAdjustElsePop(topHSD.scorer.NextDoc() != DocIdSetIterator.NO_MORE_DOCS);
+               }
+               
+               public bool TopSkipToAndAdjustElsePop(int target)
+               {
+                       return CheckAdjustElsePop(topHSD.scorer.Advance(target) != DocIdSetIterator.NO_MORE_DOCS);
+               }
+               
+               private bool CheckAdjustElsePop(bool cond)
+               {
+                       if (cond)
+                       {
+                               // see also adjustTop
+                               topHSD.doc = topHSD.scorer.DocID();
+                       }
+                       else
+                       {
+                               // see also popNoResult
+                               heap[1] = heap[size]; // move last to first
+                               heap[size] = null;
+                               size--;
+                       }
+                       DownHeap();
+                       return cond;
+               }
+               
+               /// <summary>Removes and returns the least scorer of the ScorerDocQueue in log(size)
+               /// time.
+               /// Should not be used when the queue is empty.
+               /// </summary>
+               public Scorer Pop()
+               {
+                       // assert size > 0;
+                       Scorer result = topHSD.scorer;
+                       PopNoResult();
+                       return result;
+               }
+               
+               /// <summary>Removes the least scorer of the ScorerDocQueue in log(size) time.
+               /// Should not be used when the queue is empty.
+               /// </summary>
+               private void  PopNoResult()
+               {
+                       heap[1] = heap[size]; // move last to first
+                       heap[size] = null;
+                       size--;
+                       DownHeap(); // adjust heap
+               }
+               
+               /// <summary>Should be called when the scorer at top changes doc() value.
+               /// Still log(n) worst case, but it's at least twice as fast to <pre>
+               /// { pq.top().change(); pq.adjustTop(); }
+               /// </pre> instead of <pre>
+               /// { o = pq.pop(); o.change(); pq.push(o); }
+               /// </pre>
+               /// </summary>
+               public void  AdjustTop()
+               {
+                       // assert size > 0;
+                       topHSD.Adjust();
+                       DownHeap();
+               }
+               
+               /// <summary>Returns the number of scorers currently stored in the ScorerDocQueue. </summary>
+               public int Size()
+               {
+                       return size;
+               }
+               
+               /// <summary>Removes all entries from the ScorerDocQueue. </summary>
+               public void  Clear()
+               {
+                       for (int i = 0; i <= size; i++)
+                       {
+                               heap[i] = null;
+                       }
+                       size = 0;
+               }
+               
+               private void  UpHeap()
+               {
+                       int i = size;
+                       HeapedScorerDoc node = heap[i]; // save bottom node
+                       int j = SupportClass.Number.URShift(i, 1);
+                       while ((j > 0) && (node.doc < heap[j].doc))
+                       {
+                               heap[i] = heap[j]; // shift parents down
+                               i = j;
+                               j = SupportClass.Number.URShift(j, 1);
+                       }
+                       heap[i] = node; // install saved node
+                       topHSD = heap[1];
+               }
+               
+               private void  DownHeap()
+               {
+                       int i = 1;
+                       HeapedScorerDoc node = heap[i]; // save top node
+                       int j = i << 1; // find smaller child
+                       int k = j + 1;
+                       if ((k <= size) && (heap[k].doc < heap[j].doc))
+                       {
+                               j = k;
+                       }
+                       while ((j <= size) && (heap[j].doc < node.doc))
+                       {
+                               heap[i] = heap[j]; // shift up child
+                               i = j;
+                               j = i << 1;
+                               k = j + 1;
+                               if (k <= size && (heap[k].doc < heap[j].doc))
+                               {
+                                       j = k;
+                               }
+                       }
+                       heap[i] = node; // install saved node
+                       topHSD = heap[1];
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/SimpleStringInterner.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/SimpleStringInterner.cs
new file mode 100644 (file)
index 0000000..95aafef
--- /dev/null
@@ -0,0 +1,95 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Util
+{
+       
+       
+       /// <summary> Simple lockless and memory barrier free String intern cache that is guaranteed
+       /// to return the same String instance as String.intern() does.
+       /// </summary>
+       public class SimpleStringInterner:StringInterner
+       {
+               
+               internal /*private*/ class Entry
+               {
+                       internal /*private*/ System.String str;
+                       internal /*private*/ int hash;
+                       internal /*private*/ Entry next;
+                       internal Entry(System.String str, int hash, Entry next)
+                       {
+                               this.str = str;
+                               this.hash = hash;
+                               this.next = next;
+                       }
+               }
+               
+               private Entry[] cache;
+               private int maxChainLength;
+               
+               /// <param name="tableSize"> Size of the hash table, should be a power of two.
+               /// </param>
+               /// <param name="maxChainLength"> Maximum length of each bucket, after which the oldest item inserted is dropped.
+               /// </param>
+               public SimpleStringInterner(int tableSize, int maxChainLength)
+               {
+                       cache = new Entry[System.Math.Max(1, BitUtil.NextHighestPowerOfTwo(tableSize))];
+                       this.maxChainLength = System.Math.Max(2, maxChainLength);
+               }
+               
+               // @Override
+               public override System.String Intern(System.String s)
+               {
+                       int h = s.GetHashCode();
+                       // In the future, it may be worth augmenting the string hash
+                       // if the lower bits need better distribution.
+                       int slot = h & (cache.Length - 1);
+                       
+                       Entry first = this.cache[slot];
+                       Entry nextToLast = null;
+                       
+                       int chainLength = 0;
+                       
+                       for (Entry e = first; e != null; e = e.next)
+                       {
+                               if (e.hash == h && ((System.Object) e.str == (System.Object) s || String.CompareOrdinal(e.str, s) == 0))
+                               {
+                                       // if (e.str == s || (e.hash == h && e.str.compareTo(s)==0)) {
+                                       return e.str;
+                               }
+                               
+                               chainLength++;
+                               if (e.next != null)
+                               {
+                                       nextToLast = e;
+                               }
+                       }
+                       
+                       // insertion-order cache: add new entry at head
+                       s = String.Intern(s);
+                       this.cache[slot] = new Entry(s, h, first);
+                       if (chainLength >= maxChainLength)
+                       {
+                               // prune last entry
+                               nextToLast.next = null;
+                       }
+                       return s;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/SmallFloat.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/SmallFloat.cs
new file mode 100644 (file)
index 0000000..81cd98e
--- /dev/null
@@ -0,0 +1,152 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Util
+{
+       
+       
+       /// <summary>Floating point numbers smaller than 32 bits.
+       /// 
+       /// </summary>
+       /// <version>  $Id$
+       /// </version>
+       public class SmallFloat
+       {
+               
+               /// <summary>Converts a 32 bit float to an 8 bit float.
+               /// <br/>Values less than zero are all mapped to zero.
+               /// <br/>Values are truncated (rounded down) to the nearest 8 bit value.
+               /// <br/>Values between zero and the smallest representable value
+               /// are rounded up.
+               /// 
+               /// </summary>
+               /// <param name="f">the 32 bit float to be converted to an 8 bit float (byte)
+               /// </param>
+               /// <param name="numMantissaBits">the number of mantissa bits to use in the byte, with the remainder to be used in the exponent
+               /// </param>
+               /// <param name="zeroExp">the zero-point in the range of exponent values
+               /// </param>
+               /// <returns> the 8 bit float representation
+               /// </returns>
+               public static sbyte FloatToByte(float f, int numMantissaBits, int zeroExp)
+               {
+                       // Adjustment from a float zero exponent to our zero exponent,
+                       // shifted over to our exponent position.
+                       int fzero = (63 - zeroExp) << numMantissaBits;
+                       int bits = System.BitConverter.ToInt32(System.BitConverter.GetBytes(f), 0);
+                       int smallfloat = bits >> (24 - numMantissaBits);
+                       if (smallfloat < fzero)
+                       {
+                               return (bits <= 0)?(sbyte) 0:(sbyte) 1; // underflow is mapped to smallest non-zero number.
+                       }
+                       else if (smallfloat >= fzero + 0x100)
+                       {
+                               return - 1; // overflow maps to largest number
+                       }
+                       else
+                       {
+                               return (sbyte) (smallfloat - fzero);
+                       }
+               }
+               
+               /// <summary>Converts an 8 bit float to a 32 bit float. </summary>
+               public static float ByteToFloat(byte b, int numMantissaBits, int zeroExp)
+               {
+                       // on Java1.5 & 1.6 JVMs, prebuilding a decoding array and doing a lookup
+                       // is only a little bit faster (anywhere from 0% to 7%)
+                       if (b == 0)
+                               return 0.0f;
+                       int bits = (b & 0xff) << (24 - numMantissaBits);
+                       bits += ((63 - zeroExp) << 24);
+                       return BitConverter.ToSingle(BitConverter.GetBytes(bits), 0);
+               }
+               
+               
+               //
+               // Some specializations of the generic functions follow.
+               // The generic functions are just as fast with current (1.5)
+               // -server JVMs, but still slower with client JVMs.
+               //
+               
+               /// <summary>floatToByte(b, mantissaBits=3, zeroExponent=15)
+               /// <br/>smallest non-zero value = 5.820766E-10
+               /// <br/>largest value = 7.5161928E9
+               /// <br/>epsilon = 0.125
+               /// </summary>
+               public static sbyte FloatToByte315(float f)
+               {
+                       int bits = System.BitConverter.ToInt32(System.BitConverter.GetBytes(f), 0);
+                       int smallfloat = bits >> (24 - 3);
+                       if (smallfloat < (63 - 15) << 3)
+                       {
+                               return (bits <= 0)?(sbyte) 0:(sbyte) 1;
+                       }
+                       if (smallfloat >= ((63 - 15) << 3) + 0x100)
+                       {
+                               return - 1;
+                       }
+                       return (sbyte) (smallfloat - ((63 - 15) << 3));
+               }
+               
+               /// <summary>byteToFloat(b, mantissaBits=3, zeroExponent=15) </summary>
+               public static float Byte315ToFloat(byte b)
+               {
+                       // on Java1.5 & 1.6 JVMs, prebuilding a decoding array and doing a lookup
+                       // is only a little bit faster (anywhere from 0% to 7%)
+                       if (b == 0)
+                               return 0.0f;
+                       int bits = (b & 0xff) << (24 - 3);
+                       bits += ((63 - 15) << 24);
+                       return BitConverter.ToSingle(BitConverter.GetBytes(bits), 0);
+               }
+               
+               
+               /// <summary>floatToByte(b, mantissaBits=5, zeroExponent=2)
+               /// <br/>smallest nonzero value = 0.033203125
+               /// <br/>largest value = 1984.0
+               /// <br/>epsilon = 0.03125
+               /// </summary>
+               public static sbyte FloatToByte52(float f)
+               {
+                       int bits = System.BitConverter.ToInt32(System.BitConverter.GetBytes(f), 0);
+                       int smallfloat = bits >> (24 - 5);
+                       if (smallfloat < (63 - 2) << 5)
+                       {
+                               return (bits <= 0)?(sbyte) 0:(sbyte) 1;
+                       }
+                       if (smallfloat >= ((63 - 2) << 5) + 0x100)
+                       {
+                               return - 1;
+                       }
+                       return (sbyte) (smallfloat - ((63 - 2) << 5));
+               }
+               
+               /// <summary>byteToFloat(b, mantissaBits=5, zeroExponent=2) </summary>
+               public static float Byte52ToFloat(byte b)
+               {
+                       // on Java1.5 & 1.6 JVMs, prebuilding a decoding array and doing a lookup
+                       // is only a little bit faster (anywhere from 0% to 7%)
+                       if (b == 0)
+                               return 0.0f;
+                       int bits = (b & 0xff) << (24 - 5);
+                       bits += ((63 - 2) << 24);
+                       return BitConverter.ToSingle(BitConverter.GetBytes(bits), 0);
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/SortedVIntList.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/SortedVIntList.cs
new file mode 100644 (file)
index 0000000..ced8246
--- /dev/null
@@ -0,0 +1,316 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+using DocIdSet = Mono.Lucene.Net.Search.DocIdSet;
+using DocIdSetIterator = Mono.Lucene.Net.Search.DocIdSetIterator;
+
+namespace Mono.Lucene.Net.Util
+{
+       
+       /// <summary> Stores and iterate on sorted integers in compressed form in RAM. <br/>
+       /// The code for compressing the differences between ascending integers was
+       /// borrowed from {@link Mono.Lucene.Net.Store.IndexInput} and
+       /// {@link Mono.Lucene.Net.Store.IndexOutput}.
+       /// <p/>
+       /// <b>NOTE:</b> this class assumes the stored integers are doc Ids (hence why it
+       /// extends {@link DocIdSet}). Therefore its {@link #Iterator()} assumes {@link
+       /// DocIdSetIterator#NO_MORE_DOCS} can be used as sentinel. If you intent to use
+       /// this value, then make sure it's not used during search flow.
+       /// </summary>
+       public class SortedVIntList:DocIdSet
+       {
+               private class AnonymousClassDocIdSetIterator:DocIdSetIterator
+               {
+                       public AnonymousClassDocIdSetIterator(SortedVIntList enclosingInstance)
+                       {
+                               InitBlock(enclosingInstance);
+                       }
+                       private void  InitBlock(SortedVIntList enclosingInstance)
+                       {
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private SortedVIntList enclosingInstance;
+                       public SortedVIntList Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       internal int bytePos = 0;
+                       internal int lastInt = 0;
+                       internal int doc = - 1;
+                       
+                       private void  Advance()
+                       {
+                               // See Mono.Lucene.Net.Store.IndexInput.readVInt()
+                               sbyte b = Enclosing_Instance.bytes[bytePos++];
+                               lastInt += (b & Mono.Lucene.Net.Util.SortedVIntList.VB1);
+                               for (int s = Mono.Lucene.Net.Util.SortedVIntList.BIT_SHIFT; (b & ~ Mono.Lucene.Net.Util.SortedVIntList.VB1) != 0; s += Mono.Lucene.Net.Util.SortedVIntList.BIT_SHIFT)
+                               {
+                                       b = Enclosing_Instance.bytes[bytePos++];
+                                       lastInt += ((b & Mono.Lucene.Net.Util.SortedVIntList.VB1) << s);
+                               }
+                       }
+                       
+                       /// <deprecated> use {@link #DocID()} instead. 
+                       /// </deprecated>
+            [Obsolete("use DocID() instead.")]
+                       public override int Doc()
+                       {
+                               return lastInt;
+                       }
+                       
+                       public override int DocID()
+                       {
+                               return doc;
+                       }
+                       
+                       /// <deprecated> use {@link #NextDoc()} instead. 
+                       /// </deprecated>
+            [Obsolete("use NextDoc() instead.")]
+                       public override bool Next()
+                       {
+                               return NextDoc() != NO_MORE_DOCS;
+                       }
+                       
+                       public override int NextDoc()
+                       {
+                               if (bytePos >= Enclosing_Instance.lastBytePos)
+                               {
+                                       doc = NO_MORE_DOCS;
+                               }
+                               else
+                               {
+                                       Advance();
+                                       doc = lastInt;
+                               }
+                               return doc;
+                       }
+                       
+                       /// <deprecated> use {@link #Advance(int)} instead. 
+                       /// </deprecated>
+            [Obsolete("use Advance(int) instead.")]
+                       public override bool SkipTo(int docNr)
+                       {
+                               return Advance(docNr) != NO_MORE_DOCS;
+                       }
+                       
+                       public override int Advance(int target)
+                       {
+                               while (bytePos < Enclosing_Instance.lastBytePos)
+                               {
+                                       Advance();
+                                       if (lastInt >= target)
+                                       {
+                                               return doc = lastInt;
+                                       }
+                               }
+                               return doc = NO_MORE_DOCS;
+                       }
+               }
+               /// <summary>When a BitSet has fewer than 1 in BITS2VINTLIST_SIZE bits set,
+               /// a SortedVIntList representing the index numbers of the set bits
+               /// will be smaller than that BitSet.
+               /// </summary>
+               internal const int BITS2VINTLIST_SIZE = 8;
+               
+               private int size;
+               private sbyte[] bytes;
+               private int lastBytePos;
+               
+               /// <summary>  Create a SortedVIntList from all elements of an array of integers.
+               /// 
+               /// </summary>
+               /// <param name="sortedInts"> A sorted array of non negative integers.
+               /// </param>
+               public SortedVIntList(int[] sortedInts):this(sortedInts, sortedInts.Length)
+               {
+               }
+               
+               /// <summary> Create a SortedVIntList from an array of integers.</summary>
+               /// <param name="sortedInts"> An array of sorted non negative integers.
+               /// </param>
+               /// <param name="inputSize">  The number of integers to be used from the array.
+               /// </param>
+               public SortedVIntList(int[] sortedInts, int inputSize)
+               {
+                       SortedVIntListBuilder builder = new SortedVIntListBuilder(this);
+                       for (int i = 0; i < inputSize; i++)
+                       {
+                               builder.AddInt(sortedInts[i]);
+                       }
+                       builder.Done();
+               }
+               
+               /// <summary> Create a SortedVIntList from a BitSet.</summary>
+               /// <param name="bits"> A bit set representing a set of integers.
+               /// </param>
+               public SortedVIntList(System.Collections.BitArray bits)
+               {
+                       SortedVIntListBuilder builder = new SortedVIntListBuilder(this);
+                       int nextInt = SupportClass.BitSetSupport.NextSetBit(bits, 0);
+                       while (nextInt != - 1)
+                       {
+                               builder.AddInt(nextInt);
+                               nextInt = SupportClass.BitSetSupport.NextSetBit(bits, nextInt + 1);
+                       }
+                       builder.Done();
+               }
+               
+               /// <summary> Create a SortedVIntList from an OpenBitSet.</summary>
+               /// <param name="bits"> A bit set representing a set of integers.
+               /// </param>
+               public SortedVIntList(OpenBitSet bits)
+               {
+                       SortedVIntListBuilder builder = new SortedVIntListBuilder(this);
+                       int nextInt = bits.NextSetBit(0);
+                       while (nextInt != - 1)
+                       {
+                               builder.AddInt(nextInt);
+                               nextInt = bits.NextSetBit(nextInt + 1);
+                       }
+                       builder.Done();
+               }
+               
+               /// <summary> Create a SortedVIntList.</summary>
+               /// <param name="docIdSetIterator"> An iterator providing document numbers as a set of integers.
+               /// This DocIdSetIterator is iterated completely when this constructor
+               /// is called and it must provide the integers in non
+               /// decreasing order.
+               /// </param>
+               public SortedVIntList(DocIdSetIterator docIdSetIterator)
+               {
+                       SortedVIntListBuilder builder = new SortedVIntListBuilder(this);
+                       int doc;
+                       while ((doc = docIdSetIterator.NextDoc()) != DocIdSetIterator.NO_MORE_DOCS)
+                       {
+                               builder.AddInt(doc);
+                       }
+                       builder.Done();
+               }
+               
+               
+               private class SortedVIntListBuilder
+               {
+                       private void  InitBlock(SortedVIntList enclosingInstance)
+                       {
+                               this.enclosingInstance = enclosingInstance;
+                       }
+                       private SortedVIntList enclosingInstance;
+                       public SortedVIntList Enclosing_Instance
+                       {
+                               get
+                               {
+                                       return enclosingInstance;
+                               }
+                               
+                       }
+                       private int lastInt = 0;
+                       
+                       internal SortedVIntListBuilder(SortedVIntList enclosingInstance)
+                       {
+                               InitBlock(enclosingInstance);
+                               Enclosing_Instance.InitBytes();
+                               lastInt = 0;
+                       }
+                       
+                       internal virtual void  AddInt(int nextInt)
+                       {
+                               int diff = nextInt - lastInt;
+                               if (diff < 0)
+                               {
+                                       throw new System.ArgumentException("Input not sorted or first element negative.");
+                               }
+                               
+                               if ((Enclosing_Instance.lastBytePos + Enclosing_Instance.MAX_BYTES_PER_INT) > Enclosing_Instance.bytes.Length)
+                               {
+                                       // biggest possible int does not fit
+                                       Enclosing_Instance.ResizeBytes((Enclosing_Instance.bytes.Length * 2) + Enclosing_Instance.MAX_BYTES_PER_INT);
+                               }
+                               
+                               // See Mono.Lucene.Net.Store.IndexOutput.writeVInt()
+                               while ((diff & ~ Mono.Lucene.Net.Util.SortedVIntList.VB1) != 0)
+                               {
+                                       // The high bit of the next byte needs to be set.
+                                       Enclosing_Instance.bytes[Enclosing_Instance.lastBytePos++] = (sbyte) ((diff & Mono.Lucene.Net.Util.SortedVIntList.VB1) | ~ Mono.Lucene.Net.Util.SortedVIntList.VB1);
+                                       diff = SupportClass.Number.URShift(diff, Mono.Lucene.Net.Util.SortedVIntList.BIT_SHIFT);
+                               }
+                               Enclosing_Instance.bytes[Enclosing_Instance.lastBytePos++] = (sbyte) diff; // Last byte, high bit not set.
+                               Enclosing_Instance.size++;
+                               lastInt = nextInt;
+                       }
+                       
+                       internal virtual void  Done()
+                       {
+                               Enclosing_Instance.ResizeBytes(Enclosing_Instance.lastBytePos);
+                       }
+               }
+               
+               
+               private void  InitBytes()
+               {
+                       size = 0;
+                       bytes = new sbyte[128]; // initial byte size
+                       lastBytePos = 0;
+               }
+               
+               private void  ResizeBytes(int newSize)
+               {
+                       if (newSize != bytes.Length)
+                       {
+                               sbyte[] newBytes = new sbyte[newSize];
+                               Array.Copy(bytes, 0, newBytes, 0, lastBytePos);
+                               bytes = newBytes;
+                       }
+               }
+               
+               private const int VB1 = 0x7F;
+               private const int BIT_SHIFT = 7;
+               private int MAX_BYTES_PER_INT = (31 / BIT_SHIFT) + 1;
+               
+               /// <returns>    The total number of sorted integers.
+               /// </returns>
+               public virtual int Size()
+               {
+                       return size;
+               }
+               
+               /// <returns> The size of the byte array storing the compressed sorted integers.
+               /// </returns>
+               public virtual int GetByteSize()
+               {
+                       return bytes.Length;
+               }
+               
+               /// <summary>This DocIdSet implementation is cacheable. </summary>
+               public override bool IsCacheable()
+               {
+                       return true;
+               }
+               
+               /// <returns>    An iterator over the sorted integers.
+               /// </returns>
+               public override DocIdSetIterator Iterator()
+               {
+                       return new AnonymousClassDocIdSetIterator(this);
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/SorterTemplate.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/SorterTemplate.cs
new file mode 100644 (file)
index 0000000..d2c474d
--- /dev/null
@@ -0,0 +1,224 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Util
+{
+       
+       /// <summary> Borrowed from Cglib. Allows custom swap so that two arrays can be sorted
+       /// at the same time.
+       /// </summary>
+       public abstract class SorterTemplate
+       {
+               private const int MERGESORT_THRESHOLD = 12;
+               private const int QUICKSORT_THRESHOLD = 7;
+               
+               abstract protected internal void  Swap(int i, int j);
+               abstract protected internal int Compare(int i, int j);
+               
+               public virtual void  QuickSort(int lo, int hi)
+               {
+                       QuickSortHelper(lo, hi);
+                       InsertionSort(lo, hi);
+               }
+               
+               private void  QuickSortHelper(int lo, int hi)
+               {
+                       for (; ; )
+                       {
+                               int diff = hi - lo;
+                               if (diff <= QUICKSORT_THRESHOLD)
+                               {
+                                       break;
+                               }
+                               int i = (hi + lo) / 2;
+                               if (Compare(lo, i) > 0)
+                               {
+                                       Swap(lo, i);
+                               }
+                               if (Compare(lo, hi) > 0)
+                               {
+                                       Swap(lo, hi);
+                               }
+                               if (Compare(i, hi) > 0)
+                               {
+                                       Swap(i, hi);
+                               }
+                               int j = hi - 1;
+                               Swap(i, j);
+                               i = lo;
+                               int v = j;
+                               for (; ; )
+                               {
+                                       while (Compare(++i, v) < 0)
+                                       {
+                                               /* nothing */ ;
+                                       }
+                                       while (Compare(--j, v) > 0)
+                                       {
+                                               /* nothing */ ;
+                                       }
+                                       if (j < i)
+                                       {
+                                               break;
+                                       }
+                                       Swap(i, j);
+                               }
+                               Swap(i, hi - 1);
+                               if (j - lo <= hi - i + 1)
+                               {
+                                       QuickSortHelper(lo, j);
+                                       lo = i + 1;
+                               }
+                               else
+                               {
+                                       QuickSortHelper(i + 1, hi);
+                                       hi = j;
+                               }
+                       }
+               }
+               
+               private void  InsertionSort(int lo, int hi)
+               {
+                       for (int i = lo + 1; i <= hi; i++)
+                       {
+                               for (int j = i; j > lo; j--)
+                               {
+                                       if (Compare(j - 1, j) > 0)
+                                       {
+                                               Swap(j - 1, j);
+                                       }
+                                       else
+                                       {
+                                               break;
+                                       }
+                               }
+                       }
+               }
+               
+               protected internal virtual void  MergeSort(int lo, int hi)
+               {
+                       int diff = hi - lo;
+                       if (diff <= MERGESORT_THRESHOLD)
+                       {
+                               InsertionSort(lo, hi);
+                               return ;
+                       }
+                       int mid = lo + diff / 2;
+                       MergeSort(lo, mid);
+                       MergeSort(mid, hi);
+                       Merge(lo, mid, hi, mid - lo, hi - mid);
+               }
+               
+               private void  Merge(int lo, int pivot, int hi, int len1, int len2)
+               {
+                       if (len1 == 0 || len2 == 0)
+                       {
+                               return ;
+                       }
+                       if (len1 + len2 == 2)
+                       {
+                               if (Compare(pivot, lo) < 0)
+                               {
+                                       Swap(pivot, lo);
+                               }
+                               return ;
+                       }
+                       int first_cut, second_cut;
+                       int len11, len22;
+                       if (len1 > len2)
+                       {
+                               len11 = len1 / 2;
+                               first_cut = lo + len11;
+                               second_cut = Lower(pivot, hi, first_cut);
+                               len22 = second_cut - pivot;
+                       }
+                       else
+                       {
+                               len22 = len2 / 2;
+                               second_cut = pivot + len22;
+                               first_cut = Upper(lo, pivot, second_cut);
+                               len11 = first_cut - lo;
+                       }
+                       Rotate(first_cut, pivot, second_cut);
+                       int new_mid = first_cut + len22;
+                       Merge(lo, first_cut, new_mid, len11, len22);
+                       Merge(new_mid, second_cut, hi, len1 - len11, len2 - len22);
+               }
+               
+               private void  Rotate(int lo, int mid, int hi)
+               {
+                       int lot = lo;
+                       int hit = mid - 1;
+                       while (lot < hit)
+                       {
+                               Swap(lot++, hit--);
+                       }
+                       lot = mid; hit = hi - 1;
+                       while (lot < hit)
+                       {
+                               Swap(lot++, hit--);
+                       }
+                       lot = lo; hit = hi - 1;
+                       while (lot < hit)
+                       {
+                               Swap(lot++, hit--);
+                       }
+               }
+               
+               private int Lower(int lo, int hi, int val)
+               {
+                       int len = hi - lo;
+                       while (len > 0)
+                       {
+                               int half = len / 2;
+                               int mid = lo + half;
+                               if (Compare(mid, val) < 0)
+                               {
+                                       lo = mid + 1;
+                                       len = len - half - 1;
+                               }
+                               else
+                               {
+                                       len = half;
+                               }
+                       }
+                       return lo;
+               }
+               
+               private int Upper(int lo, int hi, int val)
+               {
+                       int len = hi - lo;
+                       while (len > 0)
+                       {
+                               int half = len / 2;
+                               int mid = lo + half;
+                               if (Compare(val, mid) < 0)
+                               {
+                                       len = half;
+                               }
+                               else
+                               {
+                                       lo = mid + 1;
+                                       len = len - half - 1;
+                               }
+                       }
+                       return lo;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/StringHelper.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/StringHelper.cs
new file mode 100644 (file)
index 0000000..65a1d73
--- /dev/null
@@ -0,0 +1,90 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Util
+{
+       
+       
+       /// <summary> Methods for manipulating strings.
+       /// 
+       /// $Id: StringHelper.java 801344 2009-08-05 18:05:06Z yonik $
+       /// </summary>
+       public abstract class StringHelper
+       {
+               /// <summary> Expert:
+               /// The StringInterner implementation used by Lucene.
+               /// This shouldn't be changed to an incompatible implementation after other Lucene APIs have been used.
+               /// </summary>
+               public static StringInterner interner = new SimpleStringInterner(1024, 8);
+               
+               /// <summary>Return the same string object for all equal strings </summary>
+               public static System.String Intern(System.String s)
+               {
+                       return interner.Intern(s);
+               }
+               
+               /// <summary> Compares two byte[] arrays, element by element, and returns the
+               /// number of elements common to both arrays.
+               /// 
+               /// </summary>
+               /// <param name="bytes1">The first byte[] to compare
+               /// </param>
+               /// <param name="bytes2">The second byte[] to compare
+               /// </param>
+               /// <returns> The number of common elements.
+               /// </returns>
+               public static int BytesDifference(byte[] bytes1, int len1, byte[] bytes2, int len2)
+               {
+                       int len = len1 < len2?len1:len2;
+                       for (int i = 0; i < len; i++)
+                               if (bytes1[i] != bytes2[i])
+                                       return i;
+                       return len;
+               }
+               
+               /// <summary> Compares two strings, character by character, and returns the
+               /// first position where the two strings differ from one another.
+               /// 
+               /// </summary>
+               /// <param name="s1">The first string to compare
+               /// </param>
+               /// <param name="s2">The second string to compare
+               /// </param>
+               /// <returns> The first position where the two strings differ.
+               /// </returns>
+               public static int StringDifference(System.String s1, System.String s2)
+               {
+                       int len1 = s1.Length;
+                       int len2 = s2.Length;
+                       int len = len1 < len2?len1:len2;
+                       for (int i = 0; i < len; i++)
+                       {
+                               if (s1[i] != s2[i])
+                               {
+                                       return i;
+                               }
+                       }
+                       return len;
+               }
+               
+               private StringHelper()
+               {
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/StringInterner.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/StringInterner.cs
new file mode 100644 (file)
index 0000000..87b9ee7
--- /dev/null
@@ -0,0 +1,44 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Util
+{
+       
+       /// <summary> Subclasses of StringInterner are required to
+       /// return the same single String object for all equal strings.
+       /// Depending on the implementation, this may not be
+       /// the same object returned as String.intern().
+       /// 
+       /// This StringInterner base class simply delegates to String.intern().
+       /// </summary>
+       public class StringInterner
+       {
+               /// <summary>Returns a single object instance for each equal string. </summary>
+               public virtual System.String Intern(System.String s)
+               {
+                       return String.Intern(s);
+               }
+               
+               /// <summary>Returns a single object instance for each equal string. </summary>
+               public virtual System.String Intern(char[] arr, int offset, int len)
+               {
+                       return Intern(new System.String(arr, offset, len));
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/ToStringUtils.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/ToStringUtils.cs
new file mode 100644 (file)
index 0000000..f672cd4
--- /dev/null
@@ -0,0 +1,40 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Util
+{
+       
+       /// <summary> Helper methods to ease implementing {@link Object#toString()}.</summary>
+       public class ToStringUtils
+       {
+               /// <summary>for printing boost only if not 1.0 </summary>
+               public static System.String Boost(float boost)
+               {
+                       if (boost != 1.0f)
+                       {
+                float boostAsLong = (long) boost;
+                if (boostAsLong == boost)
+                    return "^" + boost.ToString(".0").Replace(System.Globalization.CultureInfo.CurrentCulture.NumberFormat.NumberDecimalSeparator, ".");
+                return "^" + boost.ToString().Replace(System.Globalization.CultureInfo.CurrentCulture.NumberFormat.NumberDecimalSeparator, ".");
+                       }
+                       else
+                               return "";
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/UnicodeUtil.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/UnicodeUtil.cs
new file mode 100644 (file)
index 0000000..d1ea88e
--- /dev/null
@@ -0,0 +1,505 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Util
+{
+       
+       
+       /*
+       * Some of this code came from the excellent Unicode
+       * conversion examples from:
+       *
+       *   http://www.unicode.org/Public/PROGRAMS/CVTUTF
+       *
+       * Full Copyright for that code follows:*/
+       
+       /*
+       * Copyright 2001-2004 Unicode, Inc.
+       * 
+       * Disclaimer
+       * 
+       * This source code is provided as is by Unicode, Inc. No claims are
+       * made as to fitness for any particular purpose. No warranties of any
+       * kind are expressed or implied. The recipient agrees to determine
+       * applicability of information provided. If this file has been
+       * purchased on magnetic or optical media from Unicode, Inc., the
+       * sole remedy for any claim will be exchange of defective media
+       * within 90 days of receipt.
+       * 
+       * Limitations on Rights to Redistribute This Code
+       * 
+       * Unicode, Inc. hereby grants the right to freely use the information
+       * supplied in this file in the creation of products supporting the
+       * Unicode Standard, and to make copies of this file in any form
+       * for internal or external distribution as long as this notice
+       * remains attached.
+       */
+       
+       /// <summary> Class to encode java's UTF16 char[] into UTF8 byte[]
+       /// without always allocating a new byte[] as
+       /// String.getBytes("UTF-8") does.
+       /// 
+       /// <p/><b>WARNING</b>: This API is a new and experimental and
+       /// may suddenly change. <p/>
+       /// </summary>
+       
+       sealed public class UnicodeUtil
+       {
+               
+               public const int UNI_SUR_HIGH_START = 0xD800;
+               public const int UNI_SUR_HIGH_END = 0xDBFF;
+               public const int UNI_SUR_LOW_START = 0xDC00;
+               public const int UNI_SUR_LOW_END = 0xDFFF;
+               public const int UNI_REPLACEMENT_CHAR = 0xFFFD;
+               
+               private const long UNI_MAX_BMP = 0x0000FFFF;
+               
+               private const int HALF_BASE = 0x0010000;
+               private const long HALF_SHIFT = 10;
+               private const long HALF_MASK = 0x3FFL;
+               
+               public sealed class UTF8Result
+               {
+                       public byte[] result = new byte[10];
+                       public int length;
+                       
+                       public void  SetLength(int newLength)
+                       {
+                               if (result.Length < newLength)
+                               {
+                                       byte[] newArray = new byte[(int) (1.5 * newLength)];
+                                       Array.Copy(result, 0, newArray, 0, length);
+                                       result = newArray;
+                               }
+                               length = newLength;
+                       }
+               }
+               
+               public sealed class UTF16Result
+               {
+                       public char[] result = new char[10];
+                       public int[] offsets = new int[10];
+                       public int length;
+                       
+                       public void  SetLength(int newLength)
+                       {
+                               if (result.Length < newLength)
+                               {
+                                       char[] newArray = new char[(int) (1.5 * newLength)];
+                                       Array.Copy(result, 0, newArray, 0, length);
+                                       result = newArray;
+                               }
+                               length = newLength;
+                       }
+                       
+                       public void  CopyText(UTF16Result other)
+                       {
+                               SetLength(other.length);
+                               Array.Copy(other.result, 0, result, 0, length);
+                       }
+               }
+               
+               /// <summary>Encode characters from a char[] source, starting at
+               /// offset and stopping when the character 0xffff is seen.
+               /// Returns the number of bytes written to bytesOut. 
+               /// </summary>
+               public static void  UTF16toUTF8(char[] source, int offset, UTF8Result result)
+               {
+                       
+                       int upto = 0;
+                       int i = offset;
+                       byte[] out_Renamed = result.result;
+                       
+                       while (true)
+                       {
+                               
+                               int code = (int) source[i++];
+                               
+                               if (upto + 4 > out_Renamed.Length)
+                               {
+                                       byte[] newOut = new byte[2 * out_Renamed.Length];
+                                       System.Diagnostics.Debug.Assert(newOut.Length >= upto + 4);
+                                       Array.Copy(out_Renamed, 0, newOut, 0, upto);
+                                       result.result = out_Renamed = newOut;
+                               }
+                               if (code < 0x80)
+                                       out_Renamed[upto++] = (byte) code;
+                               else if (code < 0x800)
+                               {
+                                       out_Renamed[upto++] = (byte) (0xC0 | (code >> 6));
+                                       out_Renamed[upto++] = (byte) (0x80 | (code & 0x3F));
+                               }
+                               else if (code < 0xD800 || code > 0xDFFF)
+                               {
+                                       if (code == 0xffff)
+                                       // END
+                                               break;
+                                       out_Renamed[upto++] = (byte) (0xE0 | (code >> 12));
+                                       out_Renamed[upto++] = (byte) (0x80 | ((code >> 6) & 0x3F));
+                                       out_Renamed[upto++] = (byte) (0x80 | (code & 0x3F));
+                               }
+                               else
+                               {
+                                       // surrogate pair
+                                       // confirm valid high surrogate
+                                       if (code < 0xDC00 && source[i] != 0xffff)
+                                       {
+                                               int utf32 = (int) source[i];
+                                               // confirm valid low surrogate and write pair
+                                               if (utf32 >= 0xDC00 && utf32 <= 0xDFFF)
+                                               {
+                                                       utf32 = ((code - 0xD7C0) << 10) + (utf32 & 0x3FF);
+                                                       i++;
+                                                       out_Renamed[upto++] = (byte) (0xF0 | (utf32 >> 18));
+                                                       out_Renamed[upto++] = (byte) (0x80 | ((utf32 >> 12) & 0x3F));
+                                                       out_Renamed[upto++] = (byte) (0x80 | ((utf32 >> 6) & 0x3F));
+                                                       out_Renamed[upto++] = (byte) (0x80 | (utf32 & 0x3F));
+                                                       continue;
+                                               }
+                                       }
+                                       // replace unpaired surrogate or out-of-order low surrogate
+                                       // with substitution character
+                                       out_Renamed[upto++] = (byte) (0xEF);
+                                       out_Renamed[upto++] = (byte) (0xBF);
+                                       out_Renamed[upto++] = (byte) (0xBD);
+                               }
+                       }
+                       //assert matches(source, offset, i-offset-1, out, upto);
+                       result.length = upto;
+               }
+               
+               /// <summary>Encode characters from a char[] source, starting at
+               /// offset for length chars.  Returns the number of bytes
+               /// written to bytesOut. 
+               /// </summary>
+               public static void  UTF16toUTF8(char[] source, int offset, int length, UTF8Result result)
+               {
+                       
+                       int upto = 0;
+                       int i = offset;
+                       int end = offset + length;
+                       byte[] out_Renamed = result.result;
+                       
+                       while (i < end)
+                       {
+                               
+                               int code = (int) source[i++];
+                               
+                               if (upto + 4 > out_Renamed.Length)
+                               {
+                                       byte[] newOut = new byte[2 * out_Renamed.Length];
+                                       System.Diagnostics.Debug.Assert(newOut.Length >= upto + 4);
+                                       Array.Copy(out_Renamed, 0, newOut, 0, upto);
+                                       result.result = out_Renamed = newOut;
+                               }
+                               if (code < 0x80)
+                                       out_Renamed[upto++] = (byte) code;
+                               else if (code < 0x800)
+                               {
+                                       out_Renamed[upto++] = (byte) (0xC0 | (code >> 6));
+                                       out_Renamed[upto++] = (byte) (0x80 | (code & 0x3F));
+                               }
+                               else if (code < 0xD800 || code > 0xDFFF)
+                               {
+                                       out_Renamed[upto++] = (byte) (0xE0 | (code >> 12));
+                                       out_Renamed[upto++] = (byte) (0x80 | ((code >> 6) & 0x3F));
+                                       out_Renamed[upto++] = (byte) (0x80 | (code & 0x3F));
+                               }
+                               else
+                               {
+                                       // surrogate pair
+                                       // confirm valid high surrogate
+                                       if (code < 0xDC00 && i < end && source[i] != 0xffff)
+                                       {
+                                               int utf32 = (int) source[i];
+                                               // confirm valid low surrogate and write pair
+                                               if (utf32 >= 0xDC00 && utf32 <= 0xDFFF)
+                                               {
+                                                       utf32 = ((code - 0xD7C0) << 10) + (utf32 & 0x3FF);
+                                                       i++;
+                                                       out_Renamed[upto++] = (byte) (0xF0 | (utf32 >> 18));
+                                                       out_Renamed[upto++] = (byte) (0x80 | ((utf32 >> 12) & 0x3F));
+                                                       out_Renamed[upto++] = (byte) (0x80 | ((utf32 >> 6) & 0x3F));
+                                                       out_Renamed[upto++] = (byte) (0x80 | (utf32 & 0x3F));
+                                                       continue;
+                                               }
+                                       }
+                                       // replace unpaired surrogate or out-of-order low surrogate
+                                       // with substitution character
+                                       out_Renamed[upto++] = (byte) (0xEF);
+                                       out_Renamed[upto++] = (byte) (0xBF);
+                                       out_Renamed[upto++] = (byte) (0xBD);
+                               }
+                       }
+                       //assert matches(source, offset, length, out, upto);
+                       result.length = upto;
+               }
+               
+               /// <summary>Encode characters from this String, starting at offset
+               /// for length characters.  Returns the number of bytes
+               /// written to bytesOut. 
+               /// </summary>
+               public static void  UTF16toUTF8(System.String s, int offset, int length, UTF8Result result)
+               {
+                       int end = offset + length;
+                       
+                       byte[] out_Renamed = result.result;
+                       
+                       int upto = 0;
+                       for (int i = offset; i < end; i++)
+                       {
+                               int code = (int) s[i];
+                               
+                               if (upto + 4 > out_Renamed.Length)
+                               {
+                                       byte[] newOut = new byte[2 * out_Renamed.Length];
+                                       System.Diagnostics.Debug.Assert(newOut.Length >= upto + 4);
+                                       Array.Copy(out_Renamed, 0, newOut, 0, upto);
+                                       result.result = out_Renamed = newOut;
+                               }
+                               if (code < 0x80)
+                                       out_Renamed[upto++] = (byte) code;
+                               else if (code < 0x800)
+                               {
+                                       out_Renamed[upto++] = (byte) (0xC0 | (code >> 6));
+                                       out_Renamed[upto++] = (byte) (0x80 | (code & 0x3F));
+                               }
+                               else if (code < 0xD800 || code > 0xDFFF)
+                               {
+                                       out_Renamed[upto++] = (byte) (0xE0 | (code >> 12));
+                                       out_Renamed[upto++] = (byte) (0x80 | ((code >> 6) & 0x3F));
+                                       out_Renamed[upto++] = (byte) (0x80 | (code & 0x3F));
+                               }
+                               else
+                               {
+                                       // surrogate pair
+                                       // confirm valid high surrogate
+                                       if (code < 0xDC00 && (i < end - 1))
+                                       {
+                                               int utf32 = (int) s[i + 1];
+                                               // confirm valid low surrogate and write pair
+                                               if (utf32 >= 0xDC00 && utf32 <= 0xDFFF)
+                                               {
+                                                       utf32 = ((code - 0xD7C0) << 10) + (utf32 & 0x3FF);
+                                                       i++;
+                                                       out_Renamed[upto++] = (byte) (0xF0 | (utf32 >> 18));
+                                                       out_Renamed[upto++] = (byte) (0x80 | ((utf32 >> 12) & 0x3F));
+                                                       out_Renamed[upto++] = (byte) (0x80 | ((utf32 >> 6) & 0x3F));
+                                                       out_Renamed[upto++] = (byte) (0x80 | (utf32 & 0x3F));
+                                                       continue;
+                                               }
+                                       }
+                                       // replace unpaired surrogate or out-of-order low surrogate
+                                       // with substitution character
+                                       out_Renamed[upto++] = (byte) (0xEF);
+                                       out_Renamed[upto++] = (byte) (0xBF);
+                                       out_Renamed[upto++] = (byte) (0xBD);
+                               }
+                       }
+                       //assert matches(s, offset, length, out, upto);
+                       result.length = upto;
+               }
+               
+               /// <summary>Convert UTF8 bytes into UTF16 characters.  If offset
+               /// is non-zero, conversion starts at that starting point
+               /// in utf8, re-using the results from the previous call
+               /// up until offset. 
+               /// </summary>
+               public static void  UTF8toUTF16(byte[] utf8, int offset, int length, UTF16Result result)
+               {
+                       
+                       int end = offset + length;
+                       char[] out_Renamed = result.result;
+                       if (result.offsets.Length <= end)
+                       {
+                               int[] newOffsets = new int[2 * end];
+                               Array.Copy(result.offsets, 0, newOffsets, 0, result.offsets.Length);
+                               result.offsets = newOffsets;
+                       }
+                       int[] offsets = result.offsets;
+                       
+                       // If incremental decoding fell in the middle of a
+                       // single unicode character, rollback to its start:
+                       int upto = offset;
+                       while (offsets[upto] == - 1)
+                               upto--;
+                       
+                       int outUpto = offsets[upto];
+                       
+                       // Pre-allocate for worst case 1-for-1
+                       if (outUpto + length >= out_Renamed.Length)
+                       {
+                               char[] newOut = new char[2 * (outUpto + length)];
+                               Array.Copy(out_Renamed, 0, newOut, 0, outUpto);
+                               result.result = out_Renamed = newOut;
+                       }
+                       
+                       while (upto < end)
+                       {
+                               
+                               int b = utf8[upto] & 0xff;
+                               int ch;
+                               
+                               offsets[upto++] = outUpto;
+                               
+                               if (b < 0xc0)
+                               {
+                                       System.Diagnostics.Debug.Assert(b < 0x80);
+                                       ch = b;
+                               }
+                               else if (b < 0xe0)
+                               {
+                                       ch = ((b & 0x1f) << 6) + (utf8[upto] & 0x3f);
+                                       offsets[upto++] = - 1;
+                               }
+                               else if (b < 0xf0)
+                               {
+                                       ch = ((b & 0xf) << 12) + ((utf8[upto] & 0x3f) << 6) + (utf8[upto + 1] & 0x3f);
+                                       offsets[upto++] = - 1;
+                                       offsets[upto++] = - 1;
+                               }
+                               else
+                               {
+                                       System.Diagnostics.Debug.Assert(b < 0xf8);
+                                       ch = ((b & 0x7) << 18) + ((utf8[upto] & 0x3f) << 12) + ((utf8[upto + 1] & 0x3f) << 6) + (utf8[upto + 2] & 0x3f);
+                                       offsets[upto++] = - 1;
+                                       offsets[upto++] = - 1;
+                                       offsets[upto++] = - 1;
+                               }
+                               
+                               if (ch <= UNI_MAX_BMP)
+                               {
+                                       // target is a character <= 0xFFFF
+                                       out_Renamed[outUpto++] = (char) ch;
+                               }
+                               else
+                               {
+                                       // target is a character in range 0xFFFF - 0x10FFFF
+                                       int chHalf = ch - HALF_BASE;
+                                       out_Renamed[outUpto++] = (char) ((chHalf >> (int) HALF_SHIFT) + UNI_SUR_HIGH_START);
+                                       out_Renamed[outUpto++] = (char) ((chHalf & HALF_MASK) + UNI_SUR_LOW_START);
+                               }
+                       }
+                       
+                       offsets[upto] = outUpto;
+                       result.length = outUpto;
+               }
+               
+               // Only called from assert
+               /*
+               private static boolean matches(char[] source, int offset, int length, byte[] result, int upto) {
+               try {
+               String s1 = new String(source, offset, length);
+               String s2 = new String(result, 0, upto, "UTF-8");
+               if (!s1.equals(s2)) {
+               //System.out.println("DIFF: s1 len=" + s1.length());
+               //for(int i=0;i<s1.length();i++)
+               //  System.out.println("    " + i + ": " + (int) s1.charAt(i));
+               //System.out.println("s2 len=" + s2.length());
+               //for(int i=0;i<s2.length();i++)
+               //  System.out.println("    " + i + ": " + (int) s2.charAt(i));
+               
+               // If the input string was invalid, then the
+               // difference is OK
+               if (!validUTF16String(s1))
+               return true;
+               
+               return false;
+               }
+               return s1.equals(s2);
+               } catch (UnsupportedEncodingException uee) {
+               return false;
+               }
+               }
+               
+               // Only called from assert
+               private static boolean matches(String source, int offset, int length, byte[] result, int upto) {
+               try {
+               String s1 = source.substring(offset, offset+length);
+               String s2 = new String(result, 0, upto, "UTF-8");
+               if (!s1.equals(s2)) {
+               // Allow a difference if s1 is not valid UTF-16
+               
+               //System.out.println("DIFF: s1 len=" + s1.length());
+               //for(int i=0;i<s1.length();i++)
+               //  System.out.println("    " + i + ": " + (int) s1.charAt(i));
+               //System.out.println("  s2 len=" + s2.length());
+               //for(int i=0;i<s2.length();i++)
+               //  System.out.println("    " + i + ": " + (int) s2.charAt(i));
+               
+               // If the input string was invalid, then the
+               // difference is OK
+               if (!validUTF16String(s1))
+               return true;
+               
+               return false;
+               }
+               return s1.equals(s2);
+               } catch (UnsupportedEncodingException uee) {
+               return false;
+               }
+               }
+               
+               public static final boolean validUTF16String(String s) {
+               final int size = s.length();
+               for(int i=0;i<size;i++) {
+               char ch = s.charAt(i);
+               if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_HIGH_END) {
+               if (i < size-1) {
+               i++;
+               char nextCH = s.charAt(i);
+               if (nextCH >= UNI_SUR_LOW_START && nextCH <= UNI_SUR_LOW_END) {
+               // Valid surrogate pair
+               } else
+               // Unmatched hight surrogate
+               return false;
+               } else
+               // Unmatched hight surrogate
+               return false;
+               } else if (ch >= UNI_SUR_LOW_START && ch <= UNI_SUR_LOW_END)
+               // Unmatched low surrogate
+               return false;
+               }
+               
+               return true;
+               }
+               
+               public static final boolean validUTF16String(char[] s, int size) {
+               for(int i=0;i<size;i++) {
+               char ch = s[i];
+               if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_HIGH_END) {
+               if (i < size-1) {
+               i++;
+               char nextCH = s[i];
+               if (nextCH >= UNI_SUR_LOW_START && nextCH <= UNI_SUR_LOW_END) {
+               // Valid surrogate pair
+               } else
+               return false;
+               } else
+               return false;
+               } else if (ch >= UNI_SUR_LOW_START && ch <= UNI_SUR_LOW_END)
+               // Unmatched low surrogate
+               return false;
+               }
+               
+               return true;
+               }
+               */
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/Version.cs b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/Util/Version.cs
new file mode 100644 (file)
index 0000000..c75e7cd
--- /dev/null
@@ -0,0 +1,84 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+
+namespace Mono.Lucene.Net.Util
+{
+       
+       /// <summary> Use by certain classes to match version compatibility
+       /// across releases of Lucene.
+    ///  <p/>
+    ///  <b>WARNING</b>: When changing the version parameter
+    ///  that you supply to components in Lucene, do not simply
+    ///  change the version at search-time, but instead also adjust
+    ///  your indexing code to match, and re-index.
+       /// </summary>
+       [Serializable]
+       public sealed class Version:Parameter
+       {
+        /// <summary>
+               /// <p/><b>WARNING</b>: if you use this setting, and then
+               /// upgrade to a newer release of Lucene, sizable changes
+               /// may happen.  If precise back compatibility is important
+               /// then you should instead explicitly specify an actual
+               /// version.
+        /// If you use this constant then you may need to
+        /// <b>re-index all of your documents</b> when upgrading
+        /// Lucene, as the way text is indexed may have changed.
+        /// Additionally, you may need to <b>re-test your entire
+        /// application</b> to ensure it behaves as expected, as
+        /// some defaults may have changed and may break functionality
+        /// in your application.
+               /// </summary>
+        [Obsolete("Use an actual version instead.")]
+               public static readonly Version LUCENE_CURRENT = new Version("LUCENE_CURRENT", 0);
+               
+               /// <summary>Match settings and bugs in Lucene's 2.0 release. </summary>
+               public static readonly Version LUCENE_20 = new Version("LUCENE_20", 2000);
+               
+               /// <summary>Match settings and bugs in Lucene's 2.1 release. </summary>
+               public static readonly Version LUCENE_21 = new Version("LUCENE_21", 2100);
+               
+               /// <summary>Match settings and bugs in Lucene's 2.2 release. </summary>
+               public static readonly Version LUCENE_22 = new Version("LUCENE_22", 2200);
+               
+               /// <summary>Match settings and bugs in Lucene's 2.3 release. </summary>
+               public static readonly Version LUCENE_23 = new Version("LUCENE_23", 2300);
+
+        /// <summary>Match settings and bugs in Lucene's 2.3 release. </summary>
+               public static readonly Version LUCENE_24 = new Version("LUCENE_24", 2400);
+
+        /// <summary>Match settings and bugs in Lucene's 2.3 release. 
+        /// Use this to get the latest & greatest settings, bug
+        /// fixes, etc, for Lucene.
+        /// </summary>
+               public static readonly Version LUCENE_29 = new Version("LUCENE_29", 2900);
+               
+               private int v;
+               
+               public Version(System.String name, int v):base(name)
+               {
+                       this.v = v;
+               }
+               
+               public bool OnOrAfter(Version other)
+               {
+                       return v == 0 || v >= other.v;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/lucene.net.project.nuspec b/mcs/tools/monkeydoc/Lucene.Net/Lucene.Net/lucene.net.project.nuspec
new file mode 100644 (file)
index 0000000..297b9c5
--- /dev/null
@@ -0,0 +1,21 @@
+<?xml version="1.0"?>\r
+<package >\r
+  <metadata>\r
+    <id>Lucene.Net.Core</id>\r
+    <version>$version$</version>\r
+    <authors>$author$</authors>\r
+    <owners>The Apache Software Foundation</owners>\r
+    <licenseUrl>http://www.apache.org/licenses/LICENSE-2.0.html</licenseUrl>\r
+    <projectUrl>http://incubator.apache.org/lucene.net/</projectUrl>\r
+    <iconUrl>..\..\branding\logo\lucene-net-icon-128x128.png</iconUrl>\r
+    <requireLicenseAcceptance>false</requireLicenseAcceptance>\r
+    <description>\r
+       Lucene.Net Core: This contains only the core Lucene.Net assembly.\r
+       Lucene.Net is a port of the Lucene search engine library, written in C# and targeted at .NET runtime users.</description>\r
+    <copyright>Copyright 2011</copyright>\r
+    <tags>Tag1 Tag2</tags>\r
+    <dependencies>\r
+      \r
+    </dependencies>\r
+  </metadata>\r
+</package>
\ No newline at end of file
diff --git a/mcs/tools/monkeydoc/Makefile b/mcs/tools/monkeydoc/Makefile
new file mode 100644 (file)
index 0000000..782c8de
--- /dev/null
@@ -0,0 +1,140 @@
+thisdir = tools/monkeydoc
+SUBDIRS = 
+include ../../build/rules.make
+
+LIBRARY = monkeydoc.dll
+LIBRARY_PACKAGE = monkeydoc
+# Remove a bunch of "obsolete"-type warning for Lucene.NET
+LOCAL_MCS_FLAGS = /nowarn:618,612,672,809
+
+JAY_FLAGS = -ct
+
+IMAGES = \
+       Resources/images/bc_bg.png              \
+       Resources/images/bc_separator.png       \
+       Resources/images/error.png              \
+       Resources/images/hatch.png              \
+       Resources/images/headerbg.png           \
+       Resources/images/help.png               \
+       Resources/images/house.png              \
+       Resources/images/members.png            \
+       Resources/images/namespace.png          \
+       Resources/images/privclass.png          \
+       Resources/images/privdelegate.png       \
+       Resources/images/privenumeration.png    \
+       Resources/images/privevent.png          \
+       Resources/images/privextension.png      \
+       Resources/images/privfield.png          \
+       Resources/images/privinterface.png      \
+       Resources/images/privmethod.png         \
+       Resources/images/privproperty.png       \
+       Resources/images/privstructure.png      \
+       Resources/images/protclass.png          \
+       Resources/images/protdelegate.png       \
+       Resources/images/protenumeration.png    \
+       Resources/images/protevent.png          \
+       Resources/images/protextension.png      \
+       Resources/images/protfield.png          \
+       Resources/images/protinterface.png      \
+       Resources/images/protmethod.png         \
+       Resources/images/protproperty.png       \
+       Resources/images/protstructure.png      \
+       Resources/images/pubclass.png           \
+       Resources/images/pubdelegate.png        \
+       Resources/images/pubenumeration.png     \
+       Resources/images/pubevent.png           \
+       Resources/images/pubextension.png       \
+       Resources/images/pubfield.png           \
+       Resources/images/pubinterface.png       \
+       Resources/images/pubmethod.png          \
+       Resources/images/pubproperty.png        \
+       Resources/images/pubstructure.png       \
+       Resources/images/reference.png          \
+       Resources/images/treebg.png             
+
+IMAGE_RESOURCE_COMMAND = $(foreach file,$(IMAGES),/resource:$(file),$(notdir $(file)))
+
+RESOURCE_FILES = \
+       ../../docs/monodoc.xml            \
+       Resources/base.css                \
+       Resources/ecmaspec-html-css.xsl   \
+       Resources/ecmaspec-html.xsl       \
+       Resources/ecmaspec.css            \
+       Resources/helper.js               \
+       Resources/home.html               \
+       Resources/Lminus.gif              \
+       Resources/Lplus.gif               \
+       Resources/mdoc-html-format.xsl    \
+       Resources/mdoc-html-utils.xsl     \
+       Resources/mdoc-sections-css.xsl   \
+       Resources/mdoc-sections.xsl       \
+       Resources/mono-ecma-css.xsl       \
+       Resources/mono-ecma-impl.xsl      \
+       Resources/mono-ecma.css           \
+       Resources/mono-ecma.xsl                 \
+       Resources/toc-html.xsl           \
+       $(IMAGES)
+
+EXTRA_DISTFILES = \
+       monkeydoc.dll.config.in   \
+       $(RESOURCE_FILES)
+
+LIB_MCS_FLAGS = \
+       /codepage:utf8                                                    \
+       /nowarn:169,164,162,168,219,618,612                                      \
+       /r:Commons.Xml.Relaxng                                            \
+       /resource:../../docs/monodoc.xml,monodoc.xml                      \
+       /resource:Resources/base.css,base.css                             \
+       /resource:Resources/ecmaspec-html-css.xsl,ecmaspec-html-css.xsl   \
+       /resource:Resources/ecmaspec-html.xsl,ecmaspec-html.xsl           \
+       /resource:Resources/ecmaspec.css,ecmaspec.css                     \
+       /resource:Resources/helper.js,helper.js                           \
+       /resource:Resources/home.html,home.html                           \
+       /resource:Resources/Lminus.gif,Lminus.gif                         \
+       /resource:Resources/Lplus.gif,Lplus.gif                           \
+       /resource:Resources/mdoc-html-format.xsl,mdoc-html-format.xsl     \
+       /resource:Resources/mdoc-html-utils.xsl,mdoc-html-utils.xsl       \
+       /resource:Resources/mdoc-sections-css.xsl,mdoc-sections-css.xsl   \
+       /resource:Resources/mdoc-sections.xsl,mdoc-sections.xsl           \
+       /resource:Resources/mono-ecma-css.xsl,mono-ecma-css.xsl           \
+       /resource:Resources/mono-ecma-impl.xsl,mono-ecma-impl.xsl         \
+       /resource:Resources/mono-ecma.css,mono-ecma.css                   \
+       /resource:Resources/mono-ecma.xsl,mono-ecma.xsl                   \
+       /resource:Resources/toc-html.xsl,toc-html.xsl                     \
+       $(IMAGE_RESOURCE_COMMAND)                                         \
+       /r:ICSharpCode.SharpZipLib                                        \
+       /r:$(corlib)                                                      \
+       /r:System.dll                                                     \
+       /r:System.Core.dll                                                \
+       /r:System.Xml.dll                                                 \
+       /r:System.Xml.Linq.dll                                                 \
+       /r:System.Configuration.dll
+
+TEST_MCS_FLAGS = /r:System.Core.dll
+
+DOC_SOURCE_DIRS = \
+       ../../docs \
+       ../../../docs
+
+DOC_SOURCES = $(foreach dir,$(DOC_SOURCE_DIRS),$(wildcard $(dir)/*.source $(dir)/*.tree $(dir)/*.zip))
+
+include ../../build/library.make
+
+$(the_lib): Makefile $(RESOURCE_FILES)
+
+all-local: $(the_lib).config Monkeydoc.Ecma/EcmaUrlParser.cs
+
+test-local: setup-doc-sources
+
+$(the_lib).config: Makefile
+       sed 's,@monodoc_refdir@,$(mono_libdir)/monodoc,g' monkeydoc.dll.config.in > $@
+
+Monkeydoc.Ecma/EcmaUrlParser.cs: Monkeydoc.Ecma/EcmaUrlParser.jay $(topdir)/jay/skeleton.cs
+       $(topdir)/jay/jay $(JAY_FLAGS) < $(topdir)/jay/skeleton.cs $< > jay-tmp.out && mv jay-tmp.out $@
+
+parser.exe: Monkeydoc.Ecma/EcmaUrlParser.cs Monkeydoc.Ecma/EcmaUrlTokenizer.cs Monkeydoc.Ecma/EcmaUrlParserDriver.cs Monkeydoc.Ecma/EcmaDesc.cs
+       mcs /out:$@ /debug $^
+
+setup-doc-sources: $(DOC_SOURCES)
+       mkdir -p ./Test/monodoc/sources/
+       cp $(DOC_SOURCES) ./Test/monodoc/sources/
diff --git a/mcs/tools/monkeydoc/Monkeydoc.Ecma/EcmaDesc.cs b/mcs/tools/monkeydoc/Monkeydoc.Ecma/EcmaDesc.cs
new file mode 100644 (file)
index 0000000..eff84b9
--- /dev/null
@@ -0,0 +1,304 @@
+using System;
+using System.Linq;
+using System.Text;
+using System.Collections.Generic;
+
+namespace Monkeydoc.Ecma
+{
+       /* Some properties might not be filled/meaningful depending on kind
+        * like a namespace EcmaUrl won't have a valid TypeName
+        */
+       public class EcmaDesc : IEquatable<EcmaDesc>
+       {
+               public enum Kind
+               {
+                       Type,
+                       Constructor,
+                       Method,
+                       Namespace,
+                       Field,
+                       Property,
+                       Event,
+                       Operator
+               }
+
+               public enum Mod
+               {
+                       Normal,
+                       Pointer,
+                       Ref,
+                       Out
+               }
+
+               public enum Format
+               {
+                       WithArgs,
+                       WithoutArgs
+               }
+
+               public Kind DescKind {
+                       get;
+                       set;
+               }
+
+               public Mod DescModifier {
+                       get;
+                       set;
+               }
+
+               public string Namespace {
+                       get;
+                       set;
+               }
+
+               public string TypeName {
+                       get;
+                       set;
+               }
+
+               public string MemberName {
+                       get;
+                       set;
+               }
+
+               public EcmaDesc NestedType {
+                       get;
+                       set;
+               }
+
+               /* A list of the array dimensions attached to this type.
+                * The list count corresponds to the number of recursive
+                * array definition (jagged arrays) the value of the
+                * corresponding list item is the number of dimension
+                * attached to that array definition instance
+                */
+               public IList<int> ArrayDimensions {
+                       get;
+                       set;
+               }
+
+               /* Depending on the form of the url, we might not have the type
+                * of the argument but only how many the type/member has i.e.
+                * when such number is specified with a backtick
+                */
+               public IList<EcmaDesc> GenericTypeArguments {
+                       get;
+                       set;
+               }
+
+               public IList<EcmaDesc> GenericMemberArguments {
+                       get;
+                       set;
+               }
+
+               public IList<EcmaDesc> MemberArguments {
+                       get;
+                       set;
+               }
+
+               /* This indicates that we actually want an inner part of the ecmadesc
+                * i.e. in case of T: we could want the members (*), ctor (C), methods (M), ...
+                */
+               public char Etc {
+                       get;
+                       set;
+               }
+
+               public bool IsEtc {
+                       get {
+                               return Etc != (char)0;
+                       }
+               }
+
+               /* EtcFilter is only valid in some case of IsEtc when the inner part needs
+                * to be further filtered e.g. in case we want a listing of the type overloads
+                * Equals
+                */
+               public string EtcFilter {
+                       get;
+                       set;
+               }
+
+               /* When a member is an explicit implementation of an interface member, we register
+                * the member EcmaDesc with its interface parent here
+                */
+               public EcmaDesc ExplicitImplMember {
+                       get;
+                       set;
+               }
+
+               // Returns the TypeName and the generic/inner type information if existing
+               public string ToCompleteTypeName (char innerTypeSeparator = '.')
+               {
+                       var result = TypeName;
+                       if (GenericTypeArguments != null)
+                               result += FormatGenericArgs (GenericTypeArguments);
+                       if (NestedType != null)
+                               result += innerTypeSeparator + NestedType.ToCompleteTypeName ();
+                       if (ArrayDimensions != null && ArrayDimensions.Count > 0)
+                               result += ArrayDimensions.Select (dim => "[" + new string (',', dim - 1) + "]").Aggregate (string.Concat);
+
+                       return result;
+               }
+
+               // Returns the member name with its generic types if existing
+               public string ToCompleteMemberName (Format format)
+               {
+                       /* We special process two cases:
+                        *   - Explicit member implementation which append a full type specification
+                        *   - Conversion operator which are exposed as normal method but have specific captioning in the end
+                        */
+                       if (ExplicitImplMember != null) {
+                               var impl = ExplicitImplMember;
+                               return impl.FormattedNamespace + impl.ToCompleteTypeName () + "." + impl.ToCompleteMemberName (format);
+                       } else if (format == Format.WithArgs && DescKind == Kind.Operator && MemberName.EndsWith ("Conversion")) {
+                               var type1 = MemberArguments[0].FormattedNamespace + MemberArguments[0].ToCompleteTypeName () + ModToString (MemberArguments[0]);
+                               var type2 = MemberArguments[1].FormattedNamespace + MemberArguments[1].ToCompleteTypeName () + ModToString (MemberArguments[1]);
+                               return string.Format ("{0} to {1}", type1, type2);
+                       }
+
+                       var result = IsEtc && !string.IsNullOrEmpty (EtcFilter) ? EtcFilter : MemberName;
+
+                       // Temporary hack for monodoc produced inner type ctor
+                       if (DescKind == Kind.Constructor && NestedType != null)
+                               result = ToCompleteTypeName ();
+
+                       if (GenericMemberArguments != null)
+                               result += FormatGenericArgs (GenericMemberArguments);
+
+                       if (format == Format.WithArgs) {
+                               result += '(';
+                               if (MemberArguments != null && MemberArguments.Count > 0) {
+                                       var args = MemberArguments.Select (a => FormatNamespace (a) + a.ToCompleteTypeName ('+') + ModToString (a));
+                                       result += string.Join (",", args);
+                               }
+                               result += ')';
+                       }
+                       return result;
+               }
+
+               public string ToEcmaCref ()
+               {
+                       var sb = new StringBuilder ();
+                       // Cref type
+                       sb.Append (DescKind.ToString ()[0]);
+                       // Create the rest
+                       ConstructCRef (sb);
+
+                       return sb.ToString ();
+               }
+
+               void ConstructCRef (StringBuilder sb)
+               {
+                       sb.Append (Namespace);
+                       if (DescKind == Kind.Namespace)
+                               return;
+
+                       sb.Append ('.');
+                       sb.Append (TypeName);
+                       if (GenericTypeArguments != null) {
+                               sb.Append ('<');
+                               foreach (var t in GenericTypeArguments)
+                                       t.ConstructCRef (sb);
+                               sb.Append ('>');
+                       }
+                       if (NestedType != null) {
+                               sb.Append ('+');
+                               NestedType.ConstructCRef (sb);
+                       }
+                       if (ArrayDimensions != null && ArrayDimensions.Count > 0) {
+                               for (int i = 0; i < ArrayDimensions.Count; i++) {
+                                       sb.Append ('[');
+                                       sb.Append (new string (',', ArrayDimensions[i] - 1));
+                                       sb.Append (']');
+                               }
+                       }
+                       if (DescKind == Kind.Type)
+                               return;
+
+                       if (MemberArguments != null) {
+                               
+                       }
+               }
+
+               public override string ToString ()
+               {
+                       return string.Format ("({8}) {0}::{1}{2}{3}{7} {4}{5}{6} {9}",
+                                             Namespace,
+                                             TypeName,
+                                             FormatGenericArgsFull (GenericTypeArguments),
+                                             NestedType != null ? "+" + NestedType.ToString () : string.Empty,
+                                             MemberName ?? string.Empty,
+                                             FormatGenericArgsFull (GenericMemberArguments),
+                                             MemberArguments != null ? "(" + string.Join (",", MemberArguments.Select (m => m.ToString ())) + ")" : string.Empty,
+                                             ArrayDimensions != null && ArrayDimensions.Count > 0 ? ArrayDimensions.Select (dim => "[" + new string (',', dim - 1) + "]").Aggregate (string.Concat) : string.Empty,
+                                             DescKind.ToString ()[0],
+                                             Etc != 0 ? '(' + Etc.ToString () + ')' : string.Empty);
+                                             
+               }
+
+               public override bool Equals (object other)
+               {
+                       var otherDesc = other as EcmaDesc;
+                       return otherDesc != null && Equals (otherDesc);
+               }
+
+               public bool Equals (EcmaDesc other)
+               {
+                       return DescKind == other.DescKind
+                               && TypeName == other.TypeName
+                               && Namespace == other.Namespace
+                               && MemberName == other.MemberName
+                               && NestedType == other.NestedType || NestedType.Equals (other.NestedType)
+                               && (ArrayDimensions == null || ArrayDimensions.SequenceEqual (other.ArrayDimensions))
+                               && (GenericTypeArguments == null || GenericTypeArguments.SequenceEqual (other.GenericTypeArguments))
+                               && (GenericMemberArguments == null || GenericMemberArguments.SequenceEqual (other.GenericMemberArguments))
+                               && (MemberArguments == null || MemberArguments.SequenceEqual (other.MemberArguments))
+                               && Etc == other.Etc
+                               && EtcFilter == other.EtcFilter
+                               && (ExplicitImplMember == null || ExplicitImplMember.Equals (other.ExplicitImplMember));
+               }
+
+               bool What (bool input)
+               {
+                       if (!input)
+                               throw new Exception ("Not equal");
+                       return input;
+               }
+
+               string FormatNamespace (EcmaDesc desc)
+               {
+                       return string.IsNullOrEmpty (desc.Namespace) ? string.Empty : desc.Namespace + ".";
+               }
+
+               string FormatGenericArgs (IEnumerable<EcmaDesc> genericArgs)
+               {
+                       return genericArgs != null ? "<" + string.Join (",", genericArgs.Select (t => FormatNamespace (t) + t.ToCompleteTypeName ())) + ">" : string.Empty;
+               }
+
+               string FormatGenericArgsFull (IEnumerable<EcmaDesc> genericArgs)
+               {
+                       return genericArgs != null ? "<" + string.Join (",", genericArgs.Select (t => t.ToString ())) + ">" : string.Empty;
+               }
+
+               string ModToString (EcmaDesc desc)
+               {
+                       switch (desc.DescModifier) {
+                       case Mod.Pointer:
+                               return "*";
+                       case Mod.Ref:
+                               return "&";
+                       case Mod.Out:
+                               return "@";
+                       default:
+                               return string.Empty;
+                       }
+               }
+
+               string FormattedNamespace {
+                       get {
+                               return !string.IsNullOrEmpty (Namespace) ? Namespace + "." : string.Empty;
+                       }
+               }
+       }
+}
\ No newline at end of file
diff --git a/mcs/tools/monkeydoc/Monkeydoc.Ecma/EcmaUrlParser.jay b/mcs/tools/monkeydoc/Monkeydoc.Ecma/EcmaUrlParser.jay
new file mode 100644 (file)
index 0000000..a1a1851
--- /dev/null
@@ -0,0 +1,288 @@
+%{
+using System.Text;
+using System.IO;
+using System;
+using System.Linq;
+using System.Collections.Generic;
+
+namespace Monkeydoc.Ecma
+{
+       public class EcmaUrlParser
+       {
+        int yacc_verbose_flag = 0;
+
+        public void IsValid (string input)
+        {
+            var reader = new StringReader (input);
+                       var lexer = new EcmaUrlTokenizer (reader);
+                       this.yyparse (lexer);
+        }
+
+        public EcmaDesc Parse (string input)
+        {
+            var reader = new StringReader (input);
+                       var lexer = new EcmaUrlTokenizer (reader);
+                       return (EcmaDesc)this.yyparse (lexer);
+        }
+
+        public bool TryParse (string input, out EcmaDesc desc)
+        {
+            desc = null;
+            try {
+                desc = Parse (input);
+            } catch {
+                return false;
+            }
+            return true;
+        }
+
+        EcmaDesc CopyFromEcmaDesc (EcmaDesc dest, EcmaDesc orig)
+        {
+            if (string.IsNullOrEmpty (dest.Namespace))
+               dest.Namespace = orig.Namespace;
+            if (string.IsNullOrEmpty (dest.TypeName))
+               dest.TypeName = orig.TypeName;
+            if (dest.GenericTypeArguments == null)
+               dest.GenericTypeArguments = orig.GenericTypeArguments;
+            if (dest.NestedType == null)
+               dest.NestedType = orig.NestedType;
+            if (dest.ArrayDimensions == null)
+               dest.ArrayDimensions = orig.ArrayDimensions;
+            if (string.IsNullOrEmpty (dest.MemberName))
+               dest.MemberName = orig.MemberName;
+            if (dest.GenericMemberArguments == null)
+               dest.GenericMemberArguments = orig.GenericMemberArguments;
+            if (dest.MemberArguments == null)
+               dest.MemberArguments = orig.MemberArguments;
+            if (orig.IsEtc) {
+               dest.Etc = orig.Etc;
+               dest.EtcFilter = orig.EtcFilter;
+            }
+            if (orig.DescModifier != EcmaDesc.Mod.Normal)
+               dest.DescModifier = orig.DescModifier;
+            if (orig.ExplicitImplMember != null)
+               dest.ExplicitImplMember = orig.ExplicitImplMember;
+
+            return dest;
+        }
+
+        List<T> SafeReverse<T> (List<T> input)
+        {
+            if (input == null)
+               return null;
+            input.Reverse ();
+            return input;
+        }
+%}
+
+%token ERROR
+%token IDENTIFIER
+%token DIGIT
+%token DOT
+%token COMMA
+%token COLON
+%token INNER_TYPE_SEPARATOR
+%token OP_GENERICS_LT
+%token OP_GENERICS_GT
+%token OP_GENERICS_BACKTICK
+%token OP_OPEN_PAREN
+%token OP_CLOSE_PAREN
+%token OP_ARRAY_OPEN
+%token OP_ARRAY_CLOSE
+%token SLASH_SEPARATOR
+%token STAR
+%token REF_ARG
+%token OUT_ARG
+%token EXPLICIT_IMPL_SEP
+
+%start expression
+
+%%
+
+expression
+        : 'T' COLON type_expression { $$ = CopyFromEcmaDesc (new EcmaDesc { DescKind = EcmaDesc.Kind.Type }, (EcmaDesc)$3); }
+        | 'N' COLON namespace_expression { $$ = CopyFromEcmaDesc (new EcmaDesc { DescKind = EcmaDesc.Kind.Namespace }, (EcmaDesc)$3); }
+        | 'M' COLON method_expression { $$ = CopyFromEcmaDesc (new EcmaDesc { DescKind = EcmaDesc.Kind.Method }, (EcmaDesc)$3); }
+        | 'F' COLON simple_member_expression { $$ = CopyFromEcmaDesc (new EcmaDesc { DescKind = EcmaDesc.Kind.Field }, (EcmaDesc)$3); }
+        | 'C' COLON constructor_expression { $$ = CopyFromEcmaDesc (new EcmaDesc { DescKind = EcmaDesc.Kind.Constructor }, (EcmaDesc)$3); }
+        | 'P' COLON property_expression { $$ = CopyFromEcmaDesc (new EcmaDesc { DescKind = EcmaDesc.Kind.Property }, (EcmaDesc)$3); }
+        | 'E' COLON simple_member_expression { $$ = CopyFromEcmaDesc (new EcmaDesc { DescKind = EcmaDesc.Kind.Event }, (EcmaDesc)$3); }
+        | 'O' COLON operator_expression { $$ = CopyFromEcmaDesc (new EcmaDesc { DescKind = EcmaDesc.Kind.Operator }, (EcmaDesc)$3); }
+
+/* i.e. id.id.id or id */
+dot_expression
+        : IDENTIFIER { $$ = new List<string> { (string)$1 }; }
+        | IDENTIFIER DOT dot_expression { ((ICollection<string>)$3).Add ((string)$1); $$ = $3; }
+
+namespace_expression
+        : dot_expression { $$ = new EcmaDesc { Namespace = string.Join (".", ((IEnumerable<string>)$1).Reverse ()) }; }
+
+type_expression
+        : dot_expression type_expression_suffix {
+                         var dotExpr = ((List<string>)$1);
+                         dotExpr.Reverse ();
+                         var desc = $2 as EcmaDesc;
+                         desc.DescKind = EcmaDesc.Kind.Type;
+                         desc.Namespace = string.Join (".", dotExpr.Take (dotExpr.Count - 1));
+                         desc.TypeName = dotExpr.Last ();
+                         $$ = desc;
+                     }
+
+/* To be used in types with no namespaces attached to them like an inner type*/
+reduced_type_expression
+        : IDENTIFIER type_expression_suffix {
+                         var desc = $2 as EcmaDesc;
+                         desc.DescKind = EcmaDesc.Kind.Type;
+                         desc.TypeName = $1 as string;
+                         $$ = desc;
+                     }
+
+type_expression_suffix
+        : opt_generic_type_suffix opt_inner_type_description opt_array_definition opt_etc {
+                         bool nestedDescHasEtc = $2 != null && ((EcmaDesc)$2).IsEtc;
+                         EcmaDesc nestedType = (EcmaDesc)$2;
+                         $$ = new EcmaDesc {
+                            GenericTypeArguments = $1 as List<EcmaDesc>,
+                            NestedType = nestedType,
+                            ArrayDimensions = $3 as IList<int>,
+                            Etc = $4 != null ? ((Tuple<char, string>)$4).Item1 : nestedDescHasEtc ? nestedType.Etc : (char)0,
+                            EtcFilter = $4 != null ? ((Tuple<char, string>)$4).Item2 : nestedDescHasEtc ? nestedType.EtcFilter : null
+                         };
+                         if (nestedDescHasEtc) {
+                            nestedType.Etc = (char)0;
+                            nestedType.EtcFilter = null;
+                         }
+                     }
+
+opt_inner_type_description
+        : /* empty */ { $$ = null; }
+        | INNER_TYPE_SEPARATOR reduced_type_expression { $$ = $2; }
+
+opt_generic_type_suffix
+        : /* empty */ { $$ = null; }
+        | OP_GENERICS_BACKTICK DIGIT { $$ = Enumerable.Repeat<string> (null, (int)$2).ToList (); }
+        | OP_GENERICS_LT generic_type_arg_list OP_GENERICS_GT { $$ = $2; }
+
+generic_type_arg_list
+        : type_expression { $$ = new List<EcmaDesc> () { (EcmaDesc)$1 }; }
+        | generic_type_arg_list COMMA type_expression { ((List<EcmaDesc>)$1).Add ((EcmaDesc)$3); $$ = $1; }
+
+opt_array_definition
+        : /* empty */ { $$ = null; }
+        | OP_ARRAY_OPEN opt_array_definition_list OP_ARRAY_CLOSE opt_array_definition {
+                      var dims = ((IList<int>)$4) ?? new List<int> (2);
+                      dims.Add ((int)$2);
+                      $$ = dims;
+                }
+
+opt_array_definition_list
+        : /* empty */ { $$ = 1; }
+        | COMMA opt_array_definition_list { $$ = ((int)$2) + 1; }
+
+opt_etc
+        : /* empty */ { $$ = null; }
+        | SLASH_SEPARATOR etc_identifier { $$ = Tuple.Create<char, string> (((string)$2)[0], null); }
+        | SLASH_SEPARATOR etc_identifier SLASH_SEPARATOR reduced_member_expression { $$ = Tuple.Create<char, string> (((string)$2)[0], (string)$4); }
+/*        | SLASH_SEPARATOR etc_identifier SLASH_SEPARATOR IDENTIFIER opt_generic_type_suffix { $$ = Tuple.Create<char, string> (((string)$2)[0], (string)$4 + ($5 == null ? string.Empty : "<" + string.Join (",", ((IEnumerable<EcmaDesc>)$5).Select (t => t.ToCompleteTypeName ())) + ">")); } */
+
+etc_identifier
+        : STAR { $$ = "*"; }
+        | IDENTIFIER { $$ = $1; }
+
+method_expression
+        : type_expression DOT IDENTIFIER opt_generic_type_suffix opt_arg_list_suffix {
+                      var desc = $1 as EcmaDesc;
+                      desc.MemberName = $3 as string;
+                      desc.GenericMemberArguments = $4 as List<EcmaDesc>;
+                      desc.MemberArguments = SafeReverse ($5 as List<EcmaDesc>);
+                      $$ = desc;
+                }
+        | dot_expression opt_generic_type_suffix opt_arg_list_suffix {
+                      var dotExpr = ((List<string>)$1);
+                      $$ = new EcmaDesc {
+                           Namespace = string.Join (".", dotExpr.Skip (2).DefaultIfEmpty (string.Empty).Reverse ()),
+                           TypeName = dotExpr.Skip (1).First (),
+                           MemberName = dotExpr.First (),
+                           GenericMemberArguments = $2 as List<EcmaDesc>,
+                           MemberArguments = SafeReverse ($3 as List<EcmaDesc>)
+                      };
+                }
+        | type_expression EXPLICIT_IMPL_SEP method_expression {
+                      var desc = $1 as EcmaDesc;
+                      desc.ExplicitImplMember = $3 as EcmaDesc;
+                      $$ = desc;
+                }
+
+/* To be used with members that may have no type/namespace attached */
+reduced_member_expression
+        : IDENTIFIER opt_generic_type_suffix { $$ = (string)$1 + ($2 == null ? string.Empty : "<" + string.Join (",", ((IEnumerable<EcmaDesc>)$2).Select (t => t.ToCompleteTypeName ())) + ">"); }
+        | IDENTIFIER opt_generic_type_suffix DOT reduced_member_expression {
+                      var existing = $4 as string;
+                      var expr = (string)$1 + ($2 == null ? string.Empty : "<" + string.Join (",", ((IEnumerable<EcmaDesc>)$2).Select (t => t.ToCompleteTypeName ())) + ">");
+                      $$ = expr + "." + existing;
+                }
+
+arg_type_expression
+        : type_expression opt_arg_type_suffix { var desc = (EcmaDesc)$1; desc.DescModifier = (EcmaDesc.Mod)$2; $$ = desc; }
+
+opt_arg_type_suffix
+        : /* empty */ { $$ = EcmaDesc.Mod.Normal; }
+        | STAR { $$ = EcmaDesc.Mod.Pointer; }
+        | REF_ARG { $$ = EcmaDesc.Mod.Ref; }
+        | OUT_ARG { $$ = EcmaDesc.Mod.Out; }
+
+type_expression_list
+        : /* empty */ { $$ = null; }
+        | arg_type_expression { $$ = new List<EcmaDesc> () { (EcmaDesc)$1 }; }
+        | arg_type_expression COMMA type_expression_list { ((List<EcmaDesc>)$3).Add ((EcmaDesc)$1); $$ = $3; }
+
+simple_member_expression
+        : dot_expression {
+                 var dotExpr = ((List<string>)$1);
+                 dotExpr.Reverse ();
+
+                 $$ = new EcmaDesc {
+                      Namespace = dotExpr.Count > 2 ? string.Join (".", dotExpr.Take (dotExpr.Count - 2)) : string.Empty,
+                      TypeName = dotExpr.Count > 1 ?  dotExpr[dotExpr.Count - 2] : string.Empty,
+                      MemberName = dotExpr[dotExpr.Count - 1]
+                 };
+             }
+        | type_expression DOT IDENTIFIER {
+                 var desc = $1 as EcmaDesc;
+                 desc.MemberName = $3 as string;
+                 $$ = desc;
+             }
+        | type_expression EXPLICIT_IMPL_SEP simple_member_expression {
+                 var desc = $1 as EcmaDesc;
+                 desc.ExplicitImplMember = $3 as EcmaDesc;
+                 $$ = desc;
+             }
+
+constructor_expression
+        : method_expression { $$ = $1; }
+
+operator_expression
+        : method_expression { $$ = $1; }
+
+property_expression
+        : simple_member_expression opt_property_indexer {
+                 var desc = $1 as EcmaDesc;
+                 desc.MemberArguments = SafeReverse ($2 as List<EcmaDesc>);
+                 $$ = desc;
+             }
+
+opt_property_indexer
+        : opt_arg_list_suffix { $$ = $1; }
+
+/*simple_member_expression opt_arg_list_suffix { $$ = CopyFromEcmaDesc (new EcmaDesc {
+                           MemberArguments = SafeReverse ($2 as List<EcmaDesc>)
+                      }, (EcmaDesc)$1);
+                }*/
+
+opt_arg_list_suffix
+        : /* empty */ { $$ = null; }
+        | OP_OPEN_PAREN type_expression_list OP_CLOSE_PAREN { $$ = $2; }
+
+%%
+
+}
\ No newline at end of file
diff --git a/mcs/tools/monkeydoc/Monkeydoc.Ecma/EcmaUrlParserDriver.cs b/mcs/tools/monkeydoc/Monkeydoc.Ecma/EcmaUrlParserDriver.cs
new file mode 100644 (file)
index 0000000..93e8c35
--- /dev/null
@@ -0,0 +1,17 @@
+using System;
+using System.IO;
+
+namespace Monkeydoc.Ecma
+{
+       public class EcmaUrlParserDriver
+       {
+               public static void Main (string[] args)
+               {
+                       var input = new StringReader (args[0]);
+                       var lexer = new EcmaUrlTokenizer (input);
+                       var parser = new EcmaUrlParser ();
+
+                       Console.WriteLine (parser.yyparse (lexer));
+               }
+       }
+}
\ No newline at end of file
diff --git a/mcs/tools/monkeydoc/Monkeydoc.Ecma/EcmaUrlTokenizer.cs b/mcs/tools/monkeydoc/Monkeydoc.Ecma/EcmaUrlTokenizer.cs
new file mode 100644 (file)
index 0000000..20b798f
--- /dev/null
@@ -0,0 +1,144 @@
+using System;
+using System.IO;
+using System.Text;
+using System.Globalization;
+
+namespace Monkeydoc.Ecma
+{
+       public class EcmaUrlTokenizer : yyParser.yyInput
+       {
+               TextReader input;
+               object val;
+               int current_token;
+               int current_pos;
+               StringBuilder ident = new StringBuilder (20);
+
+               public EcmaUrlTokenizer (TextReader input)
+               {
+                       this.input = input;
+               }
+
+               static bool is_identifier_start_character (int c)
+               {
+                       return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || c == '_' || Char.IsLetter ((char)c);
+               }
+
+               static bool is_identifier_part_character (char c)
+               {
+                       if (c >= 'a' && c <= 'z')
+                               return true;
+
+                       if (c >= 'A' && c <= 'Z')
+                               return true;
+
+                       if (c == '_' || (c >= '0' && c <= '9'))
+                               return true;
+
+                       if (c < 0x80)
+                               return false;
+
+                       return Char.IsLetter (c) || Char.GetUnicodeCategory (c) == UnicodeCategory.ConnectorPunctuation;
+               }
+
+               public bool advance ()
+               {
+                       return input.Peek () != -1;
+               }
+
+               public Object Value {
+                       get {
+                               return val;
+                       }
+               }
+
+               public Object value ()
+               {
+                       return val;
+               }
+
+               public int token ()
+               {
+                       int token = xtoken ();
+                       //Console.WriteLine ("Current token {0} with value {1}", token, val == null ? "(none)" : val.ToString ());
+                       if (token == Token.ERROR)
+                               Console.WriteLine ("Problem at pos {0} after token {1}", current_pos, current_token);
+                       current_token = token;
+                       return token;
+               }
+
+               int xtoken ()
+               {
+                       char next = (char)input.Read ();
+                       while (char.IsWhiteSpace (next))
+                               next = (char)input.Read ();
+                       current_pos++;
+                       val = null;
+
+                       switch (next) {
+                       case ',':
+                               return Token.COMMA;
+                       case '.':
+                               return Token.DOT;
+                       case '<':
+                               return Token.OP_GENERICS_LT;
+                       case '>':
+                               return Token.OP_GENERICS_GT;
+                       case '`':
+                               return Token.OP_GENERICS_BACKTICK;
+                       case '(':
+                               return Token.OP_OPEN_PAREN;
+                       case ')':
+                               return Token.OP_CLOSE_PAREN;
+                       case '+':
+                               return Token.INNER_TYPE_SEPARATOR;
+                       case ':':
+                               return Token.COLON;
+                       case '/':
+                               return Token.SLASH_SEPARATOR;
+                       case '[':
+                               return Token.OP_ARRAY_OPEN;
+                       case ']':
+                               return Token.OP_ARRAY_CLOSE;
+                       case '*':
+                               return Token.STAR;
+                       case '&':
+                               return Token.REF_ARG;
+                       case '@':
+                               return Token.OUT_ARG;
+                       case '$':
+                               return Token.EXPLICIT_IMPL_SEP;
+                       default:
+                               return TokenizeIdentifierOrNumber (next);
+                       }
+               }
+
+               int TokenizeIdentifierOrNumber (char current)
+               {
+                       // We must first return the expression type which is a uppercase letter and a colon
+                       if (current_pos < 2) {
+                               val = null;
+                               return (int)current;
+                       }
+
+                       if (is_identifier_start_character (current) || current == '*') {
+                               ident.Clear ();
+                               ident.Append (current);
+                               int peek;
+
+                               while ((peek = input.Peek ()) != -1 && is_identifier_part_character ((char)peek)) {
+                                       ident.Append ((char)input.Read ());
+                                       current_pos++;
+                               }
+
+                               val = ident.ToString ();
+                               return Token.IDENTIFIER;
+                       } else if (char.IsDigit (current)) {
+                               val = current - '0';
+                               return Token.DIGIT;
+                       } else {
+                               val = null;
+                               return Token.ERROR;
+                       }
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Monkeydoc/HelpSource.cs b/mcs/tools/monkeydoc/Monkeydoc/HelpSource.cs
new file mode 100644 (file)
index 0000000..470d96c
--- /dev/null
@@ -0,0 +1,335 @@
+using System;
+using System.IO;
+using System.Linq;
+using System.Xml;
+using System.Diagnostics;
+using System.Collections.Generic;
+
+using Mono.Utilities;
+using Mono.Lucene.Net.Index;
+
+namespace MonkeyDoc
+{
+       //
+       // The HelpSource class keeps track of the archived data, and its
+       // tree
+       //
+       public class HelpSource
+       {
+               static int id;
+
+               //
+               // The unique ID for this HelpSource.
+               //
+               int source_id;
+
+               // The name of the HelpSource, used by all the file (.tree, .zip, ...) used by it
+               string name;
+               // The full directory path where the HelpSource files are located
+               string basePath;
+
+               // The tree of this help source
+               Tree tree;
+               string treeFilePath;
+               RootTree rootTree;
+
+               IDocCache cache = new MonkeyDoc.Caches.FileCache (Path.Combine (Environment.GetFolderPath (Environment.SpecialFolder.ApplicationData), "monkeydoc", "cache"));
+               IDocStorage storage;
+
+               public HelpSource (string base_filename, bool create)
+               {
+                       this.name = Path.GetFileName (base_filename);
+                       this.basePath = Path.GetDirectoryName (base_filename);
+                       this.treeFilePath = base_filename + ".tree";
+                       this.storage = new MonkeyDoc.Storage.ZipStorage (base_filename + ".zip");
+
+                       tree = create ? new Tree (this, string.Empty, string.Empty) : new Tree (this, treeFilePath);
+
+                       source_id = id++;
+               }
+       
+               public HelpSource ()
+               {
+                       tree = new Tree (this, "Blah", "Blah");
+                       source_id = id++;
+               }
+       
+               public int SourceID {
+                       get {
+                               return source_id;
+                       }
+               }
+       
+               public string Name {
+                       get {
+                               return name;
+                       }
+               }
+
+               /* This gives the full path of the source/ directory */
+               public string BaseFilePath {
+                       get {
+                               return basePath;
+                       }
+               }
+
+               public TraceLevel TraceLevel {
+                       get;
+                       set;
+               }
+
+               public string BaseDir {
+                       get {
+                               return basePath;
+                       }
+               }
+
+               public Tree Tree {
+                       get {
+                               return tree;
+                       }
+               }
+
+               public RootTree RootTree {
+                       get {
+                               return rootTree;
+                       }
+                       internal set {
+                               rootTree = value;
+                       }
+               }
+
+               public IDocCache Cache {
+                       get {
+                               return cache;
+                       }
+               }
+
+               public IDocStorage Storage {
+                       get {
+                               return storage;
+                       }
+               }
+
+               // A HelpSource may have a common prefix to its URL, give it here
+               protected virtual string UriPrefix {
+                       get {
+                               return "dummy";
+                       }
+               }
+       
+               /// <summary>
+               ///   Returns a stream from the packaged help source archive
+               /// </summary>
+               public virtual Stream GetHelpStream (string id)
+               {
+                       return storage.Retrieve (id);
+               }
+
+               public virtual Stream GetCachedHelpStream (string id)
+               {
+                       if (string.IsNullOrEmpty (id))
+                               throw new ArgumentNullException ("id");
+                       if (!cache.CanCache (DocEntity.Text))
+                               return GetHelpStream (id);
+                       if (!cache.IsCached (id))
+                               cache.CacheText (id, GetHelpStream (id));
+                       return cache.GetCachedStream (id);
+               }
+
+               public XmlReader GetHelpXml (string id)
+               {
+                       var url = "monodoc:///" + SourceID + "@" + Uri.EscapeDataString (id) + "@";
+                       var stream = cache.IsCached (id) ? cache.GetCachedStream (id) : storage.Retrieve (id);
+                       
+                       return stream == null ? null : new XmlTextReader (url, stream);
+               }
+       
+               public virtual XmlDocument GetHelpXmlWithChanges (string id)
+               {
+                       XmlDocument doc = new XmlDocument ();
+                       if (!storage.SupportRevision) {
+                               doc.Load (GetHelpXml (id));
+                       } else {
+                               var revManager = storage.RevisionManager;
+                               doc.Load (revManager.RetrieveLatestRevision (id));
+                       }
+                       return doc;
+               }
+
+               public virtual string GetCachedText (string id)
+               {
+                       if (!cache.CanCache (DocEntity.Text))
+                               return GetText (id);
+                       if (!cache.IsCached (id))
+                               cache.CacheText (id, GetText (id));
+                       return cache.GetCachedString (id);
+               }
+
+               public virtual string GetText (string id)
+               {
+                       return new StreamReader (GetHelpStream (id)).ReadToEnd ();
+               }
+
+               // Tells if the result for the provided id is generated dynamically
+               // by the help source
+               public virtual bool IsGeneratedContent (string id)
+               {
+                       return false;
+               }
+
+               // Tells if the content of the provided id is meant to be returned raw
+               public virtual bool IsRawContent (string id)
+               {
+                       return false;
+               }
+
+               // Tells if provided id refers to a multi-content-type document if it's case
+               // tells the ids it's formed of
+               public virtual bool IsMultiPart (string id, out IEnumerable<string> parts)
+               {
+                       parts = null;
+                       return false;
+               }
+
+               /// <summary>
+               ///   Saves the tree and the archive
+               /// </summary>
+               public void Save ()
+               {
+                       tree.Save (treeFilePath);
+                       storage.Dispose ();
+               }
+       
+               public virtual void RenderPreviewDocs (XmlNode newNode, XmlWriter writer)
+               {
+                       throw new NotImplementedException ();
+               }
+
+               public virtual string GetPublicUrl (Node node)
+               {
+                       return node.GetInternalUrl ();
+               }
+
+               public virtual bool CanHandleUrl (string url)
+               {
+                       return url.StartsWith (UriPrefix);
+               }
+
+               public virtual string GetInternalIdForUrl (string url, out Node node)
+               {
+                       node = MatchNode (url);
+                       return node == null ? null : url.Substring (UriPrefix.Length);
+               }
+               
+               public virtual Node MatchNode (string url)
+               {
+                       Node current = null;
+
+                       var cache = LRUCache<string, Node>.Default;
+                       if ((current = cache.Get (url)) != null)
+                               return current;
+
+                       current = Tree.RootNode;
+                       var strippedUrl = url.StartsWith (UriPrefix) ? url.Substring (UriPrefix.Length) : url;
+                       var searchNode = new Node () { Element = strippedUrl };
+
+                       do {
+                               int index = current.Nodes.BinarySearch (searchNode, NodeElementComparer.Instance);
+                               if (index >= 0) {
+                                       Node n = current.Nodes[index];
+                                       //Console.WriteLine ("Binarysearch success for {0} which fell on {1}", strippedUrl, n.Element);
+                                       cache.Put (url, n);
+                                       return n;
+                               }
+                               index = ~index;
+                               if (index == current.Nodes.Count) {
+                                       //Console.WriteLine ("Match fail for {0}", strippedUrl);
+                                       //Console.WriteLine (current.Nodes.Select (n => n.Element).Aggregate ((e1, e2) => e1 + ", " + e2));
+                                       return SlowMatchNode (Tree.RootNode, cache, strippedUrl);
+                               }
+                               current = current.Nodes [index - 1];
+                               //Console.WriteLine ("Binarysearch failed for {0}, next node check is {1}", strippedUrl, current.Element);
+                       } while (true);
+
+                       return null;
+               }
+
+               /* That slow path is mainly here to handle ecmaspec type of url which are composed of hard to sort numbers
+                * because they don't have the same amount of digit. We could use a regex to harmonise the various number
+                * parts but then it would be quite specific. Since in the case of ecmaspec the tree is well-formed enough
+                * the "Slow" match should still be fast enough
+                */
+               Node SlowMatchNode (Node current, LRUCache<string, Node> cache, string url)
+               {
+                       //Console.WriteLine ("Entering slow path for {0} starting from {1}", url, current.Element);
+                       while (current != null) {
+                               bool stop = true;
+                               foreach (Node n in current.Nodes) {
+                                       var element = n.Element.StartsWith (UriPrefix) ? n.Element.Substring (UriPrefix.Length) : n.Element;
+                                       if (url == element) {
+                                               cache.Put (url, n);
+                                               return n;
+                                       } else if (url.StartsWith (element + ".") && !n.IsLeaf) {
+                                               current = n;
+                                               stop = false;
+                                               break;
+                                       }
+                               }
+                               if (stop)
+                                       current = null;
+                       }
+
+                       return null;
+               }
+               
+               class NodeElementComparer : IComparer<Node>
+               {
+                       public static NodeElementComparer Instance = new NodeElementComparer ();
+
+                       public int Compare (Node n1, Node n2)
+                       {
+                               return string.Compare (Cleanup (n1), Cleanup (n2), StringComparison.Ordinal);
+                       }
+
+                       string Cleanup (Node n)
+                       {
+                               var prefix = n.Tree != null && n.Tree.HelpSource != null ? n.Tree.HelpSource.UriPrefix : string.Empty;
+                               var element = n.Element.StartsWith (prefix) ? n.Element.Substring (prefix.Length) : n.Element;
+                               if (char.IsDigit (element, 0)) {
+                                       var count = element.TakeWhile (char.IsDigit).Count ();
+                                       element = element.PadLeft (Math.Max (0, 3 - count) + element.Length, '0');
+                               }
+                               //Console.WriteLine ("Cleaned up {0} to {1}", n.Element, element);
+                               return element;
+                       }
+               }
+
+               public virtual DocumentType GetDocumentTypeForId (string id, out Dictionary<string, string> extraParams)
+               {
+                       extraParams = null;
+                       return DocumentType.PlainText;
+               }
+
+               public virtual Stream GetImage (string url)
+               {
+                       return null;
+               }
+
+               //
+               // Populates the index.
+               //
+               public virtual void PopulateIndex (IndexMaker index_maker)
+               {
+               }
+
+               //
+               // Create different Documents for adding to Lucene search index
+               // The default action is do nothing. Subclasses should add the docs
+               // 
+               public virtual void PopulateSearchableIndex (IndexWriter writer)
+               {
+
+               }
+       }
+}
\ No newline at end of file
diff --git a/mcs/tools/monkeydoc/Monkeydoc/Provider.cs b/mcs/tools/monkeydoc/Monkeydoc/Provider.cs
new file mode 100644 (file)
index 0000000..03c54e5
--- /dev/null
@@ -0,0 +1,27 @@
+using System;
+
+namespace MonkeyDoc
+{
+       public abstract class Provider
+       {
+               //
+               // This code is used to "tag" all the different sources
+               //
+               static short serial;
+
+               public int Code { get; set; }
+
+               public Provider ()
+               {
+                       Code = serial++;
+               }
+
+               public abstract void PopulateTree (Tree tree);
+
+               //
+               // Called at shutdown time after the tree has been populated to perform
+               // any fixups or final tasks.
+               //
+               public abstract void CloseTree (HelpSource hs, Tree tree);
+       }
+}
diff --git a/mcs/tools/monkeydoc/Monkeydoc/RootTree.cs b/mcs/tools/monkeydoc/Monkeydoc/RootTree.cs
new file mode 100644 (file)
index 0000000..cc17fb3
--- /dev/null
@@ -0,0 +1,479 @@
+using System;
+using System.Collections;
+using System.Collections.Generic;
+using System.Collections.Specialized;
+using System.Configuration;
+using System.IO;
+using System.Linq;
+using System.Reflection;
+using System.Runtime.InteropServices;
+using System.Xml;
+
+using MonkeyDoc.Providers;
+using Mono.Lucene.Net.Analysis.Standard;
+using Mono.Lucene.Net.Index;
+
+namespace MonkeyDoc
+{
+       public class RootTree : Tree
+       {
+               public const int MonodocVersion = 2;
+               const string RootNamespace = "root:/";
+               string basedir;
+               List<string> uncompiledHelpSourcePaths = new List<string>();
+               HashSet<string> loadedSourceFiles = new HashSet<string>();
+               List<HelpSource> helpSources = new List<HelpSource>();
+               Dictionary<string, Node> nameToNode = new Dictionary<string, Node>();
+               Dictionary<string, HelpSource> nameToHelpSource = new Dictionary<string, HelpSource>();
+
+               public IList<HelpSource> HelpSources {
+                       get {
+                               return this.helpSources.AsReadOnly();
+                       }
+               }
+
+               public DateTime LastHelpSourceTime {
+                       get;
+                       set;
+               }
+
+               static bool IsUnix {
+                       get {
+                               int platform = (int)Environment.OSVersion.Platform;
+                               return platform == 4 || platform == 128 || platform == 6;
+                       }
+               }
+
+               RootTree () : base (null, "Mono Documentation", "root:")
+               {
+                       base.RootNode.EnsureNodes();
+                       this.LastHelpSourceTime = DateTime.Now;
+               }
+
+               public static RootTree LoadTree ()
+               {
+                       return RootTree.LoadTree (RootTree.ProbeBaseDirectories ());
+               }
+
+               static string ProbeBaseDirectories ()
+               {
+                       string result;
+                       try {
+                               NameValueCollection appSettings = ConfigurationManager.AppSettings;
+                               result = appSettings["docPath"];
+                       } catch {
+                               result = ".";
+                       }
+                       return result;
+               }
+
+               public static RootTree LoadTree (string basedir, bool includeExternal = true)
+               {
+                       if (string.IsNullOrEmpty (basedir))
+                               throw new ArgumentNullException ("basedir");
+                       if (!Directory.Exists (basedir))
+                               throw new ArgumentException ("basedir", string.Format ("Base documentation directory at '{0}' doesn't exist", basedir));
+
+                       XmlDocument xmlDocument = new XmlDocument ();
+                       string filename = Path.Combine (basedir, "monodoc.xml");
+                       xmlDocument.Load (filename);
+                       IEnumerable<string> sourceFiles = Directory.EnumerateFiles (Path.Combine (basedir, "sources"), "*.source");
+                       if (includeExternal)
+                               sourceFiles = sourceFiles.Concat (RootTree.ProbeExternalDirectorySources ());
+                       return RootTree.LoadTree (basedir, xmlDocument, sourceFiles);
+               }
+
+               static IEnumerable<string> ProbeExternalDirectorySources ()
+               {
+                       IEnumerable<string> enumerable = Enumerable.Empty<string> ();
+                       try {
+                               string path = ConfigurationManager.AppSettings["docExternalPath"];
+                               enumerable = enumerable.Concat (System.IO.Directory.EnumerateFiles (path, "*.source"));
+                       }
+                       catch {}
+
+                       if (Directory.Exists ("/Library/Frameworks/Mono.framework/External/monodoc"))
+                               enumerable = enumerable.Concat (Directory.EnumerateFiles ("/Library/Frameworks/Mono.framework/External/monodoc", "*.source"));
+                       return enumerable;
+               }
+
+               public static RootTree LoadTree (string indexDir, XmlDocument docTree, IEnumerable<string> sourceFiles)
+               {
+                       if (docTree == null) {
+                               docTree = new XmlDocument ();
+                               using  (Stream manifestResourceStream = typeof (RootTree).Assembly.GetManifestResourceStream ("monodoc.xml")) {
+                                       docTree.Load (manifestResourceStream);
+                               }
+                       }
+
+                       sourceFiles =  (sourceFiles ?? new string[0]);
+                       RootTree rootTree = new RootTree ();
+                       rootTree.basedir = indexDir;
+                       XmlNodeList xml_node_list = docTree.SelectNodes ("/node/node");
+                       rootTree.nameToNode["root"] = rootTree.RootNode;
+                       rootTree.nameToNode["libraries"] = rootTree.RootNode;
+                       rootTree.Populate (rootTree.RootNode, xml_node_list);
+
+                       if (rootTree.LookupEntryPoint ("various") == null) {
+                               Console.Error.WriteLine ("No 'various' doc node! Check monodoc.xml!");
+                               Node rootNode = rootTree.RootNode;
+                       }
+
+                       foreach (string current in sourceFiles)
+                               rootTree.AddSourceFile (current);
+
+                       RootTree.PurgeNode (rootTree.RootNode);
+                       rootTree.RootNode.Sort ();
+                       return rootTree;
+               }
+
+               public void AddSource (string sourcesDir)
+               {
+                       IEnumerable<string> enumerable = Directory.EnumerateFiles (sourcesDir, "*.source");
+                       foreach (string current in enumerable)
+                               if (!this.AddSourceFile (current))
+                                       Console.Error.WriteLine ("Error: Could not load source file {0}", current);
+               }
+
+               public bool AddSourceFile (string sourceFile)
+               {
+                       if (this.loadedSourceFiles.Contains (sourceFile))
+                               return false;
+
+                       Node node = this.LookupEntryPoint ("various") ?? base.RootNode;
+                       XmlDocument xmlDocument = new XmlDocument ();
+                       try {
+                               xmlDocument.Load (sourceFile);
+                       } catch {
+                               bool result = false;
+                               return result;
+                       }
+
+                       XmlNodeList extra_nodes = xmlDocument.SelectNodes ("/monodoc/node");
+                       if (extra_nodes.Count > 0)
+                               this.Populate (node, extra_nodes);
+
+                       XmlNodeList sources = xmlDocument.SelectNodes ("/monodoc/source");
+                       if (sources == null) {
+                               Console.Error.WriteLine ("Error: No <source> section found in the {0} file", sourceFile);
+                               return false;
+                       }
+
+                       loadedSourceFiles.Add (sourceFile);
+                       foreach (XmlNode xmlNode in sources) {
+                               XmlAttribute a = xmlNode.Attributes["provider"];
+                               if (a == null) {
+                                       Console.Error.WriteLine ("Error: no provider in <source>");
+                                       continue;
+                               }
+                               string provider = a.InnerText;
+                               a = xmlNode.Attributes["basefile"];
+                               if (a == null) {
+                                       Console.Error.WriteLine ("Error: no basefile in <source>");
+                                       continue;
+                               }
+                               string basefile = a.InnerText;
+                               a = xmlNode.Attributes["path"];
+                               if (a == null) {
+                                       Console.Error.WriteLine ("Error: no path in <source>");
+                                       continue;
+                               }
+                               string path = a.InnerText;
+                               string basefilepath = Path.Combine (Path.GetDirectoryName (sourceFile), basefile);
+                               HelpSource helpSource = RootTree.GetHelpSource (provider, basefilepath);
+                               if (helpSource != null) {
+                                       helpSource.RootTree = this;
+                                       this.helpSources.Add (helpSource);
+                                       this.nameToHelpSource[path] = helpSource;
+                                       Node node2 = this.LookupEntryPoint (path);
+                                       if (node2 == null) {
+                                               Console.Error.WriteLine ("node `{0}' is not defined on the documentation map", path);
+                                               node2 = node;
+                                       }
+                                       foreach (Node current in helpSource.Tree.RootNode.Nodes) {
+                                               node2.AddNode (current);
+                                       }
+                                       node2.Sort ();
+                               }
+                       }
+                       return true;
+               }
+
+               static bool PurgeNode (Node node)
+               {
+                       bool result = false;
+                       if (!node.Documented)
+                       {
+                               List<Node> list = new List<Node> ();
+                               foreach (Node current in node.Nodes)
+                               {
+                                       bool flag = RootTree.PurgeNode (current);
+                                       if (flag)
+                                       {
+                                               list.Add (current);
+                                       }
+                               }
+                               result =  (node.Nodes.Count == list.Count);
+                               foreach (Node current2 in list)
+                               {
+                                       node.DeleteNode (current2);
+                               }
+                       }
+                       return result;
+               }
+
+               public static string[] GetSupportedFormats ()
+               {
+                       return new string[]
+                       {
+                               "ecma",
+                               "ecmaspec",
+                               "error",
+                               "man",
+                               "xhtml"
+                       };
+               }
+
+               public static HelpSource GetHelpSource (string provider, string basefilepath)
+               {
+                       HelpSource result;
+                       try {
+                               switch (provider) {
+                               case "xhtml":
+                               case "hb":
+                                       result = new XhtmlHelpSource (basefilepath, false);
+                                       break;
+                               case "man":
+                                       result = new ManHelpSource (basefilepath, false);
+                                       break;
+                               case "error":
+                                       result = new ErrorHelpSource (basefilepath, false);
+                                       break;
+                               case "ecmaspec":
+                                       result = new EcmaSpecHelpSource (basefilepath, false);
+                                       break;
+                               case "ecma":
+                                       result = new EcmaHelpSource (basefilepath, false);
+                                       break;
+                               default:
+                                       Console.Error.WriteLine ("Error: Unknown provider specified: {0}", provider);
+                                       result = null;
+                                       break;
+                               }
+                       } catch (FileNotFoundException) {
+                               Console.Error.WriteLine ("Error: did not find one of the files in sources/" + basefilepath);
+                               result = null;
+                       }
+                       return result;
+               }
+
+               public static Provider GetProvider (string provider, params string[] basefilepaths)
+               {
+                       switch (provider) {
+                       case "ecma":
+                               return new EcmaProvider (basefilepaths[0]);
+                       case "ecmaspec":
+                               return new EcmaSpecProvider (basefilepaths[0]);
+                       case "error":
+                               return new ErrorProvider (basefilepaths[0]);
+                       case "man":
+                               return new ManProvider (basefilepaths);
+                       case "xhml":
+                       case "hb":
+                               return new XhtmlProvider (basefilepaths[0]);
+                       }
+
+                       throw new NotSupportedException (provider);
+               }
+
+               void Populate (Node parent, XmlNodeList xml_node_list)
+               {
+                       foreach (XmlNode xmlNode in xml_node_list) {
+                               XmlAttribute e = xmlNode.Attributes["parent"];
+                               Node parent2 = null;
+                               if (e != null && this.nameToNode.TryGetValue (e.InnerText, out parent2)) {
+                                       xmlNode.Attributes.Remove (e);
+                                       Populate (parent2, xmlNode.SelectNodes ("."));
+                                       continue;
+                               }
+                               e = xmlNode.Attributes["label"];
+                               if (e == null) {
+                                       Console.Error.WriteLine ("`label' attribute missing in <node>");
+                                       continue;
+                               }
+                               string label = e.InnerText;
+                               e = xmlNode.Attributes["name"];
+                               if (e == null) {
+                                       Console.Error.WriteLine ("`name' attribute missing in <node>");
+                                       continue;
+                               }
+                               string name = e.InnerText;
+                               Node orCreateNode = parent.GetOrCreateNode (label, "root:/" + name);
+                               orCreateNode.EnsureNodes ();
+                               this.nameToNode[name] = orCreateNode;
+                               XmlNodeList xmlNodeList = xmlNode.SelectNodes ("./node");
+                               if (xmlNodeList != null) {
+                                       this.Populate (orCreateNode, xmlNodeList);
+                               }
+                       }
+               }
+
+               public Node LookupEntryPoint (string name)
+               {
+                       Node result = null;
+                       if (!this.nameToNode.TryGetValue (name, out result))
+                       {
+                               result = null;
+                       }
+                       return result;
+               }
+
+               public TOutput RenderUrl<TOutput> (string url, IDocGenerator<TOutput> generator, out Node node)
+               {
+                       node = null;
+                       string internalId = null;
+                       HelpSource hs = GetHelpSourceAndIdForUrl (url, out internalId, out node);
+                       return generator.Generate (hs, internalId);
+               }
+
+               public HelpSource GetHelpSourceAndIdForUrl (string url, out string internalId, out Node node)
+               {
+                       node = null;
+                       internalId = null;
+
+                       if (url.StartsWith ("root:/"))
+                               return this.GetHelpSourceAndIdFromName (url.Substring ("root:/".Length), out internalId, out node);
+
+                       HelpSource helpSource = null;
+                       foreach (var hs in helpSources.Where (h => h.CanHandleUrl (url))) {
+                               if (!string.IsNullOrEmpty (internalId = hs.GetInternalIdForUrl (url, out node))) {
+                                       helpSource = hs;
+                                       break;
+                               }
+                       }
+
+                       return helpSource;
+               }
+
+               public HelpSource GetHelpSourceAndIdFromName (string name, out string internalId, out Node node)
+               {
+                       internalId = "root:";
+                       node = this.LookupEntryPoint (name);
+
+                       return node == null ? null : node.Nodes.Select (n => n.Tree.HelpSource).Where (hs => hs != null).Distinct ().FirstOrDefault ();
+               }
+
+               public HelpSource GetHelpSourceFromId (int id)
+               {
+                       return  (id < 0 || id >= this.helpSources.Count) ? null : this.helpSources[id];
+               }
+
+               public Stream GetImage (string url)
+               {
+                       if (url.StartsWith ("source-id:")) {
+                               string text = url.Substring (10);
+                               int num = text.IndexOf (":");
+                               string text2 = text.Substring (0, num);
+                               int id = 0;
+                               try {
+                                       id = int.Parse (text2);
+                               } catch {
+                                       Console.Error.WriteLine ("Failed to parse source-id url: {0} `{1}'", url, text2);
+                                       return null;
+                               }
+                               HelpSource helpSourceFromId = this.GetHelpSourceFromId (id);
+                               return helpSourceFromId.GetImage (text.Substring (num + 1));
+                       }
+                       Assembly assembly = Assembly.GetAssembly (typeof (RootTree));
+                       return assembly.GetManifestResourceStream (url);
+               }
+
+               public IndexReader GetIndex ()
+               {
+                       string text = Path.Combine (this.basedir, "monodoc.index");
+                       if (File.Exists (text))
+                       {
+                               return IndexReader.Load (text);
+                       }
+                       text = Path.Combine (ConfigurationManager.AppSettings["monodocIndexDirectory"], "monodoc.index");
+                       return IndexReader.Load (text);
+               }
+
+               public static void MakeIndex ()
+               {
+                       RootTree rootTree = RootTree.LoadTree ();
+                       rootTree.GenerateIndex ();
+               }
+
+               public void GenerateIndex ()
+               {
+                       IndexMaker indexMaker = new IndexMaker ();
+                       foreach (HelpSource current in this.helpSources)
+                               current.PopulateIndex (indexMaker);
+                       string text = Path.Combine (this.basedir, "monodoc.index");
+                       try {
+                               indexMaker.Save (text);
+                       } catch (UnauthorizedAccessException) {
+                               text = Path.Combine (ConfigurationManager.AppSettings["docDir"], "monodoc.index");
+                               try {
+                                       indexMaker.Save (text);
+                               } catch (UnauthorizedAccessException) {
+                                       Console.WriteLine ("Unable to write index file in {0}", Path.Combine (ConfigurationManager.AppSettings["docDir"], "monodoc.index"));
+                                       return;
+                               }
+                       }
+                       if (RootTree.IsUnix)
+                               RootTree.chmod (text, 420);
+
+                       Console.WriteLine ("Documentation index at {0} updated", text);
+               }
+
+               public SearchableIndex GetSearchIndex ()
+               {
+                       string text = Path.Combine (this.basedir, "search_index");
+                       if (System.IO.Directory.Exists (text)) {
+                               return SearchableIndex.Load (text);
+                       }
+                       text = Path.Combine (ConfigurationManager.AppSettings["docDir"], "search_index");
+                       return SearchableIndex.Load (text);
+               }
+
+               public static void MakeSearchIndex ()
+               {
+                       RootTree rootTree = RootTree.LoadTree ();
+                       rootTree.GenerateSearchIndex ();
+               }
+
+               public void GenerateSearchIndex ()
+               {
+                       Console.WriteLine ("Loading the monodoc tree...");
+                       string text = Path.Combine (this.basedir, "search_index");
+                       IndexWriter indexWriter;
+                       try {
+                               if (!Directory.Exists (text))
+                                       Directory.CreateDirectory (text);
+                               indexWriter = new IndexWriter (Mono.Lucene.Net.Store.FSDirectory.GetDirectory (text, true), new StandardAnalyzer (), true);
+                       } catch (UnauthorizedAccessException) {
+                               try {
+                                       text = Path.Combine (ConfigurationManager.AppSettings["docDir"], "search_index");
+                                       if (!Directory.Exists (text))
+                                               Directory.CreateDirectory (text);
+                                       indexWriter = new IndexWriter (Mono.Lucene.Net.Store.FSDirectory.GetDirectory (text, true), new StandardAnalyzer (), true);
+                               } catch (UnauthorizedAccessException) {
+                                       Console.WriteLine ("You don't have permissions to write on " + text);
+                                       return;
+                               }
+                       }
+                       Console.WriteLine ("Collecting and adding documents...");
+                       foreach (HelpSource current in this.helpSources) {
+                               current.PopulateSearchableIndex (indexWriter);
+                       }
+                       Console.WriteLine ("Closing...");
+                       indexWriter.Optimize ();
+                       indexWriter.Close ();
+               }
+
+               [DllImport ("libc")]
+               static extern int chmod (string filename, int mode);
+       }
+}
diff --git a/mcs/tools/monkeydoc/Monkeydoc/SearchableDocument.cs b/mcs/tools/monkeydoc/Monkeydoc/SearchableDocument.cs
new file mode 100644 (file)
index 0000000..d364de9
--- /dev/null
@@ -0,0 +1,43 @@
+//
+//
+// SearchableDocument.cs: Abstracts our model of document from the Lucene Document 
+//
+// Author: Mario Sopena
+//
+using Mono.Lucene.Net.Documents;
+
+namespace MonkeyDoc
+{
+       struct SearchableDocument
+       {
+               public string title;
+               public string url;
+               public string fulltitle;
+               public string hottext;
+               public string text;
+               public string examples;
+
+               public Document LuceneDoc {
+                       get {
+                               Document doc = new Document ();
+                               doc.Add (UnIndexed ("title", title));
+                               doc.Add (UnIndexed ("url", url));
+                               doc.Add (UnIndexed ("fulltitle", fulltitle ?? string.Empty));
+                               doc.Add (UnStored ("hottext", hottext));
+                               doc.Add (UnStored ("text", text));
+                               doc.Add (UnStored ("examples", examples));
+                               return doc;
+                       }
+               }
+
+               static Field UnIndexed(System.String name, System.String value_Renamed)
+               {
+                       return new Field(name, value_Renamed, Field.Store.YES, Field.Index.NO);
+               }
+
+               static Field UnStored(System.String name, System.String value_Renamed)
+               {
+                       return new Field(name, value_Renamed, Field.Store.NO, Field.Index.ANALYZED);
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Monkeydoc/SearchableIndex.cs b/mcs/tools/monkeydoc/Monkeydoc/SearchableIndex.cs
new file mode 100644 (file)
index 0000000..69f51ed
--- /dev/null
@@ -0,0 +1,187 @@
+//
+//
+// SearchableIndex.cs: Index that uses Lucene to search through the docs 
+//
+// Author: Mario Sopena
+//
+
+using System;
+using System.IO;
+using System.Collections;
+// Lucene imports
+using Mono.Lucene.Net.Index;
+using Mono.Lucene.Net.Documents;
+using Mono.Lucene.Net.Analysis;
+using Mono.Lucene.Net.Analysis.Standard;
+using Mono.Lucene.Net.Search;
+using Mono.Lucene.Net.QueryParsers;
+
+namespace MonkeyDoc
+{
+       public class SearchableIndex 
+       {
+               const int maxSearchCount = 30;
+
+               IndexSearcher searcher;
+               string dir;
+               public string Dir {
+                       get { 
+                               if (dir == null) dir = "search_index";
+                               return dir;
+                       }
+                       set { dir = value; }
+               }
+               public ArrayList Results;
+       
+               public static SearchableIndex Load (string dir) {
+                       SearchableIndex s = new SearchableIndex ();
+                       s.dir = dir;
+                       s.Results = new ArrayList (20);
+                       try {
+                               s.searcher = new IndexSearcher (dir);
+                       } catch (IOException) {
+                               Console.WriteLine ("Index nonexistent or in bad format");
+                               return null;
+                       }
+                       return s;
+               }
+               
+               //
+               // Search the index with term
+               //
+
+               public Result Search (string term)
+               {
+                       return Search (term, maxSearchCount);
+               }
+
+               public Result Search (string term, int count)
+               {
+                       return Search (term, count, 0);
+               }
+
+               public Result Search (string term, int count, int start) {
+                       try {
+                               term = term.ToLower ();
+                               Term htTerm = new Term ("hottext", term);
+                               Query qq1 = new FuzzyQuery (htTerm);
+                               Query qq2 = new TermQuery (htTerm);
+                               qq2.SetBoost (10f);
+                               Query qq3 = new PrefixQuery (htTerm);
+                               qq3.SetBoost (10f);
+                               DisjunctionMaxQuery q1 = new DisjunctionMaxQuery (0f);
+                               q1.Add (qq1);
+                               q1.Add (qq2);
+                               q1.Add (qq3);
+                               Query q2 = new TermQuery (new Term ("text", term));
+                               q2.SetBoost (3f);
+                               Query q3 = new TermQuery (new Term ("examples", term));
+                               q3.SetBoost (3f);
+                               DisjunctionMaxQuery q = new DisjunctionMaxQuery (0f);
+
+                               q.Add (q1);
+                               q.Add (q2);
+                               q.Add (q3);
+                       
+                               TopDocs top = SearchInternal (q, count, start);
+                               Result r = new Result (term, searcher, top.ScoreDocs);
+                               Results.Add (r);
+                               return r;
+                       } catch (IOException) {
+                               Console.WriteLine ("No index in {0}", dir);
+                               return null;
+                       }
+               }
+
+               TopDocs SearchInternal (Query q, int count, int start)
+               {
+                       // Easy path that doesn't involve creating a Collector ourselves
+                       // watch for Lucene.NET improvement on that (like searcher.SearchAfter)
+                       if (start == 0)
+                               return searcher.Search (q, count);
+
+                       var weight = searcher.CreateWeight (q); // TODO: reuse weight instead of query
+                       var collector = TopScoreDocCollector.create (start + count + 1, weight.ScoresDocsOutOfOrder());
+                       searcher.Search (q, collector);
+
+                       return collector.TopDocs (start, count);
+               }
+
+               public Result FastSearch (string term, int number)
+               {
+                       try {
+                               term = term.ToLower ();
+                               Query q1 = new TermQuery (new Term ("hottext", term));
+                               Query q2 = new PrefixQuery (new Term ("hottext", term));
+                               q2.SetBoost (0.5f);
+                               DisjunctionMaxQuery q = new DisjunctionMaxQuery (0f);
+                               q.Add (q1);
+                               q.Add (q2);
+                               TopDocs top = searcher.Search (q, number);
+                               return new Result (term, searcher, top.ScoreDocs);
+                       } catch (IOException) {
+                               Console.WriteLine ("No index in {0}", dir);
+                               return null;
+                       }
+               }
+       
+               Query Parse (string term, string field, bool fuzzy)
+               {
+                       QueryParser parser = new QueryParser (Mono.Lucene.Net.Util.Version.LUCENE_CURRENT,
+                                                             field,
+                                                             new StandardAnalyzer (Mono.Lucene.Net.Util.Version.LUCENE_CURRENT));
+                       return parser.Parse (term);
+               }
+       }
+       //
+       // An object representing the search term with the results
+       // 
+       public class Result {
+               string term;
+               Searcher searcher;
+               ScoreDoc[] docs;
+
+               public string Term {
+                       get { return term;}
+               }
+
+               public int Count {
+                       get { return docs.Length; }
+               }
+
+               public Document this [int i] {
+                       get { return searcher.Doc (docs[i].doc); }
+               }
+       
+               public string GetTitle (int i) 
+               {
+                       Document d = this[i];
+                       return d == null ? string.Empty : d.Get ("title");
+               }
+
+               public string GetUrl (int i)
+               {
+                       Document d = this[i];
+                       return d == null ? string.Empty : d.Get ("url");
+               }
+
+               public string GetFullTitle (int i)
+               {
+                       Document d = this[i];
+                       return d == null ? string.Empty : d.Get ("fulltitle");
+               }
+
+               public float Score (int i)
+               {
+                       return docs[i].score;
+               }
+
+               public Result (string Term, Searcher searcher, ScoreDoc[] docs) 
+               {
+                       this.term = Term;
+                       this.searcher = searcher;
+                       this.docs = docs;
+               }
+       }
+}
+
diff --git a/mcs/tools/monkeydoc/Monkeydoc/Tree.cs b/mcs/tools/monkeydoc/Monkeydoc/Tree.cs
new file mode 100644 (file)
index 0000000..bc7e890
--- /dev/null
@@ -0,0 +1,473 @@
+using System;
+using System.IO;
+using System.Text;
+using System.Linq;
+using System.Xml;
+using System.Collections.Generic;
+
+namespace MonkeyDoc
+{
+       /// <summary>
+       ///    This tree is populated by the documentation providers, or populated
+       ///    from a binary encoding of the tree.  The format of the tree is designed
+       ///    to minimize the need to load it in full.
+       /// </summary>
+
+       /* Ideally this class should also be abstracted to let user have something
+        * else than a file as a backing store, a database for instance
+        */
+       public class Tree
+       {
+               public readonly HelpSource HelpSource;
+       
+               FileStream InputStream;
+               BinaryReader InputReader;
+
+               // This is the node which contains all the other node of the tree
+               Node rootNode;
+
+               /// <summary>
+               ///   Load from file constructor
+               /// </summary>
+               public Tree (HelpSource hs, string filename)
+               {
+                       Encoding utf8 = new UTF8Encoding (false, true);
+
+                       if (!File.Exists (filename)){
+                               throw new FileNotFoundException ();
+                       }
+               
+                       InputStream = File.OpenRead (filename);
+                       InputReader = new BinaryReader (InputStream, utf8);
+                       byte [] sig = InputReader.ReadBytes (4);
+               
+                       if (!GoodSig (sig))
+                               throw new Exception ("Invalid file format");
+               
+                       InputStream.Position = 4;
+                       var position = InputReader.ReadInt32 ();
+                       rootNode = new Node (this, position);
+                       InflateNode (rootNode);
+
+                       HelpSource = hs;
+               }
+
+               /// <summary>
+               ///    Tree creation and merged tree constructor
+               /// </summary>
+               public Tree (HelpSource hs, string caption, string url) : this (hs, null, caption, url)
+               {
+               }
+
+               public Tree (HelpSource hs, Node parent, string caption, string element)
+               {
+                       HelpSource = hs;
+                       rootNode = parent == null ? new Node (this, caption, element) : new Node (parent, caption, element);
+               }
+
+               /// <summary>
+               ///    Saves the tree into the specified file using the help file format.
+               /// </summary>
+               public void Save (string file)
+               {
+                       Encoding utf8 = new UTF8Encoding (false, true);
+                       using (FileStream output = File.OpenWrite (file)){
+                               // Skip over the pointer to the first node.
+                               output.Position = 8;
+                       
+                               using (BinaryWriter writer = new BinaryWriter (output, utf8)) {
+                                       // Recursively dump
+                                       rootNode.Serialize (output, writer);
+
+                                       output.Position = 0;
+                                       writer.Write (new byte [] { (byte) 'M', (byte) 'o', (byte) 'H', (byte) 'P' });
+                                       writer.Write (rootNode.Address);
+                               }
+                       }
+               }
+
+               public Node RootNode {
+                       get {
+                               return rootNode;
+                       }
+               }
+
+               static bool GoodSig (byte [] sig)
+               {
+                       if (sig.Length != 4)
+                               return false;
+                       return sig [0] == (byte) 'M'
+                               && sig [1] == (byte) 'o'
+                           && sig [2] == (byte) 'H'
+                               && sig [3] == (byte) 'P';
+               }
+
+               public void InflateNode (Node baseNode)
+               {
+                       var address = baseNode.Address;
+                       if (address < 0)
+                               address = -address;
+
+                       InputStream.Position = address;
+                       baseNode.Deserialize (InputReader);
+               }
+       }
+       
+       public class Node : IComparable<Node>, IComparable
+       {
+               readonly Tree tree;
+               string caption, element;
+               public bool Documented;
+               bool loaded;
+               Node parent;
+               List<Node> nodes;
+               Dictionary<string, Node> childrenLookup;
+               /* Address has three types of value, 
+                *   _ 0 is for no on-disk representation
+                *   _ >0 is a valid address that is loaded immediately
+                *   _ <0 is a valid negated address to indicate lazy loading
+                */
+               int address;
+
+               public Node (Node parent, string caption, string element) : this (parent.Tree, caption, element)
+               {
+                       this.parent = parent;
+               }
+
+               internal Node (Tree tree, string caption, string element)
+               {
+                       this.tree = tree;
+                       this.caption = caption;
+                       this.element = element;
+               }
+       
+               /// <summary>
+               ///    Creates a node from an on-disk representation
+               /// </summary>
+               internal Node (Node parent, int address) : this (parent.tree, address)
+               {
+                       this.parent = parent;
+               }
+
+               internal Node (Tree tree, int address)
+               {
+                       this.address = address;
+                       this.tree = tree;
+                       if (address > 0)
+                               LoadNode ();
+               }
+
+               /* This is solely used for MatchNode to check for equality */
+               internal Node ()
+               {
+               }
+
+               void LoadNode ()
+               {
+                       tree.InflateNode (this);
+                       if (parent != null)
+                               parent.RegisterFullNode (this);
+               }
+
+               public void AddNode (Node n)
+               {
+                       nodes.Add (n);
+                       n.parent = this;
+                       n.Documented = true;
+                       RegisterFullNode (n);
+               }
+
+               public void DeleteNode (Node n)
+               {
+                       nodes.Remove (n);
+                       if (!string.IsNullOrEmpty (n.element))
+                               childrenLookup.Remove (n.element);
+               }
+
+               // When a child node is inflated, it calls this method
+               // so that we can add it to our lookup for quick search
+               void RegisterFullNode (Node child)
+               {
+                       if (childrenLookup == null)
+                               childrenLookup = new Dictionary<string, Node> ();
+                       if (!string.IsNullOrEmpty (child.element))
+                               childrenLookup[child.element] = child;
+               }
+
+               public List<Node> Nodes {
+                       get {
+                               EnsureLoaded ();
+                               return nodes != null ? nodes : new List<Node> ();
+                       }
+               }
+
+               public string Element {
+                       get {
+                               EnsureLoaded ();
+                               return element;
+                       }
+                       set {
+                               element = value;
+                       }
+               }
+
+               public string Caption {
+                       get {
+                               EnsureLoaded ();
+                               return caption;
+                       }
+                       internal set {
+                               caption = value;
+                       }
+               }
+       
+               public Node Parent {
+                       get {
+                               return parent;
+                       }
+               }
+
+               public Tree Tree {
+                       get {
+                               return tree;
+                       }
+               }
+
+               internal int Address {
+                       get {
+                               return address;
+                       }
+               }
+       
+               /// <summary>
+               ///   Creates a new node, in the locator entry point, and with
+               ///   a user visible caption of @caption
+               /// </summary>
+               public Node CreateNode (string c_caption, string c_element)
+               {
+                       EnsureNodes ();
+
+                       Node t = new Node (this, c_caption, c_element);
+                       nodes.Add (t);
+                       childrenLookup[c_element] = t;
+
+                       return t;
+               }
+
+               public Node GetOrCreateNode (string c_caption, string c_element)
+               {
+                       if (nodes == null)
+                               return CreateNode (c_caption, c_element);
+                       if (childrenLookup.Count != nodes.Count || (nodes.Count == 0 && childrenLookup.Count != nodes.Capacity))
+                               UpdateLookup ();
+
+                       Node result;
+                       if (!childrenLookup.TryGetValue (c_element, out result))
+                               result = CreateNode (c_caption, c_element);
+                       return result;
+               }
+
+               public void EnsureNodes ()
+               {
+                       if (nodes == null) {
+                               nodes = new List<Node> ();
+                               childrenLookup = new Dictionary<string, Node> ();
+                       }
+               }
+
+               public void EnsureLoaded ()
+               {
+                       if (address < 0 && !loaded) {
+                               LoadNode ();
+                               loaded = true;
+                       }
+               }
+
+               void UpdateLookup ()
+               {
+                       foreach (var node in nodes)
+                               childrenLookup[node.Element] = node;
+               }
+       
+               public bool IsLeaf {
+                       get {
+                               return nodes == null || nodes.Count == 0;
+                       }
+               }
+
+               void EncodeInt (BinaryWriter writer, int value)
+               {
+                       do {
+                               int high = (value >> 7) & 0x01ffffff;
+                               byte b = (byte)(value & 0x7f);
+
+                               if (high != 0) {
+                                       b = (byte)(b | 0x80);
+                               }
+                       
+                               writer.Write(b);
+                               value = high;
+                       } while(value != 0);
+               }
+
+               int DecodeInt (BinaryReader reader)
+               {
+                       int ret = 0;
+                       int shift = 0;
+                       byte b;
+               
+                       do {
+                               b = reader.ReadByte();
+
+                               ret = ret | ((b & 0x7f) << shift);
+                               shift += 7;
+                       } while ((b & 0x80) == 0x80);
+                       
+                       return ret;
+               }
+
+               internal void Deserialize (BinaryReader reader)
+               {
+                       int count = DecodeInt (reader);
+                       element = reader.ReadString ();
+                       caption = reader.ReadString ();
+
+                       if (count == 0)
+                               return;
+               
+                       nodes = new List<Node> (count);
+                       for (int i = 0; i < count; i++) {
+                               int child_address = DecodeInt (reader);
+                                                             
+                               Node t = new Node (this, -child_address);
+                               nodes.Add (t);
+                       }
+               }
+
+               internal void Serialize (FileStream output, BinaryWriter writer)
+               {
+                       if (nodes != null)
+                               foreach (Node child in nodes)
+                                       child.Serialize (output, writer);
+
+                       address = (int) output.Position;
+                       EncodeInt (writer, nodes == null ? 0 : (int) nodes.Count);
+                       writer.Write (element);
+                       writer.Write (caption);
+
+                       if (nodes != null)
+                               foreach (Node child in nodes)
+                                       EncodeInt (writer, child.address);
+               }
+
+               public void Sort ()
+               {
+                       if (nodes != null)
+                               nodes.Sort ();
+               }
+
+               internal string GetInternalUrl ()
+               {
+                       EnsureLoaded ();
+                       if (element.IndexOf (":") != -1 || parent == null)
+                               return element;
+
+                       var parentUrl = parent.GetInternalUrl ();
+                       return parentUrl.EndsWith ("/") ? parentUrl + element : parentUrl + "/" + element;
+               }
+               
+               public string PublicUrl {
+                       get {
+                               var url = GetInternalUrl ();
+                               return tree.HelpSource != null ? tree.HelpSource.GetPublicUrl (this) : url;
+                       }
+               }
+
+               int IComparable.CompareTo (object obj)
+               {
+                       Node other = obj as Node;
+                       if (other == null)
+                               return -1;
+                       return CompareToInternal (other);
+               }
+
+               int IComparable<Node>.CompareTo (Node obj)
+               {
+                       return CompareToInternal (obj);
+               }
+
+               int CompareToInternal (Node other)
+               {
+                       EnsureLoaded ();
+                       other.EnsureLoaded ();
+
+                       var cap1 = caption;
+                       var cap2 = other.caption;
+
+                       /* Some node (notably from ecmaspec) have number prepended to them
+                        * which we need to sort better by padding them to the same number
+                        * of digits
+                        */
+                       if (char.IsDigit (cap1[0]) && char.IsDigit (cap2[0])) {
+                               int c1 = cap1.TakeWhile (char.IsDigit).Count ();
+                               int c2 = cap2.TakeWhile (char.IsDigit).Count ();
+                               
+                               if (c1 != c2) {
+                                       cap1 = cap1.PadLeft (cap1.Length + Math.Max (0, c2 - c1), '0');
+                                       cap2 = cap2.PadLeft (cap2.Length + Math.Max (0, c1 - c2), '0');
+                               }
+                       }
+
+                       return string.Compare (cap1, cap2, StringComparison.OrdinalIgnoreCase);
+               }
+       }
+
+       public static class TreeDumper
+       {
+               static int indent;
+
+               static void Indent ()
+               {
+                       for (int i = 0; i < indent; i++)
+                               Console.Write ("   ");
+               }
+       
+               public static void PrintTree (Node node)
+               {
+                       Indent ();
+                       Console.WriteLine ("{0},{1}\t[PublicUrl: {2}]", node.Element, node.Caption, node.PublicUrl);
+                       if (node.Nodes.Count == 0)
+                               return;
+
+                       indent++;
+                       foreach (Node n in node.Nodes)
+                               PrintTree (n);
+                       indent--;
+               }
+
+               public static string ExportToTocXml (Node root, string title, string desc)
+               {
+                       if (root == null)
+                               throw new ArgumentNullException ("root");
+                       // Return a toc index of sub-nodes
+                       StringBuilder buf = new StringBuilder ();
+                       var writer = XmlWriter.Create (buf);
+                       writer.WriteStartElement ("toc");
+                       writer.WriteAttributeString ("title", title ?? string.Empty);
+                       writer.WriteElementString ("description", desc ?? string.Empty);
+                       writer.WriteStartElement ("list");
+                       foreach (Node n in root.Nodes) {
+                               writer.WriteStartElement ("item");
+                               writer.WriteAttributeString ("url", n.Element);
+                               writer.WriteValue (n.Caption);
+                               writer.WriteEndElement ();
+                       }
+                       writer.WriteEndElement ();
+                       writer.WriteEndElement ();
+                       writer.Flush ();
+                       writer.Close ();
+
+                       return buf.ToString ();
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Monkeydoc/TypeUtils.cs b/mcs/tools/monkeydoc/Monkeydoc/TypeUtils.cs
new file mode 100644 (file)
index 0000000..2e75663
--- /dev/null
@@ -0,0 +1,40 @@
+using System;
+
+namespace MonkeyDoc
+{
+       public static class TypeUtils
+       {
+               public static bool GetNamespaceAndType (string url, out string ns, out string type)
+               {
+                       int nsidx = -1;
+                       int numLt = 0;
+                       for (int i = 0; i < url.Length; ++i) {
+                               char c = url [i];
+                               switch (c) {
+                               case '<':
+                               case '{':
+                                       ++numLt;
+                                       break;
+                               case '>':
+                               case '}':
+                                       --numLt;
+                                       break;
+                               case '.':
+                                       if (numLt == 0)
+                                               nsidx = i;
+                                       break;
+                               }
+                       }
+
+                       if (nsidx == -1) {
+                               ns = null;
+                               type = null;
+                               return false;
+                       }
+                       ns = url.Substring (0, nsidx);
+                       type = url.Substring (nsidx + 1);
+               
+                       return true;
+               }
+       }
+}
\ No newline at end of file
diff --git a/mcs/tools/monkeydoc/Monkeydoc/cache.cs b/mcs/tools/monkeydoc/Monkeydoc/cache.cs
new file mode 100644 (file)
index 0000000..2c089be
--- /dev/null
@@ -0,0 +1,26 @@
+using System;
+using System.IO;
+
+namespace MonkeyDoc
+{
+       public enum DocEntity
+       {
+               Text,
+               Blob
+       }
+
+       public interface IDocCache : IDisposable
+       {
+               bool IsCached (string id);
+               bool CanCache (DocEntity entity);
+
+               Stream GetCachedStream (string id);
+               string GetCachedString (string id);
+
+               void CacheText (string id, string content);
+               void CacheText (string id, Stream stream);
+
+               void CacheBlob (string id, byte[] data);
+               void CacheBlob (string id, Stream stream);
+       }
+}
\ No newline at end of file
diff --git a/mcs/tools/monkeydoc/Monkeydoc/caches/FileCache.cs b/mcs/tools/monkeydoc/Monkeydoc/caches/FileCache.cs
new file mode 100644 (file)
index 0000000..333f33b
--- /dev/null
@@ -0,0 +1,75 @@
+using System;
+using System.IO;
+
+namespace MonkeyDoc.Caches
+{
+       public class FileCache : IDocCache
+       {
+               string baseCacheDir;
+
+               public FileCache (string baseCacheDir)
+               {
+                       this.baseCacheDir = baseCacheDir;
+                       if (!Directory.Exists (baseCacheDir))
+                               Directory.CreateDirectory (baseCacheDir);
+               }
+
+               public bool IsCached (string id)
+               {
+                       return File.Exists (MakePath (id));
+               }
+
+               public bool CanCache (DocEntity entity)
+               {
+                       return true;
+               }
+
+               public Stream GetCachedStream (string id)
+               {
+                       return File.OpenRead (MakePath (id));
+               }
+
+               public string GetCachedString (string id)
+               {
+                       return File.ReadAllText (MakePath (id));
+               }
+
+               public void CacheText (string id, string content)
+               {
+                       File.WriteAllText (MakePath (id), content);
+               }
+
+               public void CacheText (string id, Stream stream)
+               {
+                       using (var file = File.OpenWrite (MakePath (id)))
+                               stream.CopyTo (file);
+               }
+
+               public void CacheBlob (string id, byte[] data)
+               {
+                       File.WriteAllBytes (MakePath (id), data);
+               }
+
+               public void CacheBlob (string id, Stream stream)
+               {
+                       using (var file = File.OpenWrite (MakePath (id)))
+                               stream.CopyTo (file);
+               }
+
+               string MakePath (string id)
+               {
+                       id = id.Replace (Path.DirectorySeparatorChar, '_');
+                       return Path.Combine (baseCacheDir, id);
+               }
+
+               public void Dispose ()
+               {
+                       if (!Directory.Exists (baseCacheDir))
+                               return;
+
+                       try {
+                               Directory.Delete (baseCacheDir, true);
+                       } catch {}
+               }
+       }
+}
\ No newline at end of file
diff --git a/mcs/tools/monkeydoc/Monkeydoc/caches/NullCache.cs b/mcs/tools/monkeydoc/Monkeydoc/caches/NullCache.cs
new file mode 100644 (file)
index 0000000..1514d66
--- /dev/null
@@ -0,0 +1,54 @@
+using System;
+using System.IO;
+
+namespace MonkeyDoc.Caches
+{
+       // This is basically a no-cache implementation
+       public class NullCache : IDocCache
+       {
+               public bool IsCached (string id)
+               {
+                       return false;
+               }
+
+               public bool CanCache (DocEntity entity)
+               {
+                       return false;
+               }
+
+               public Stream GetCachedStream (string id)
+               {
+                       return null;
+               }
+
+               public string GetCachedString (string id)
+               {
+                       return null;
+               }
+
+               public void CacheText (string id, string content)
+               {
+
+               }
+
+               public void CacheText (string id, Stream stream)
+               {
+
+               }
+
+               public void CacheBlob (string id, byte[] data)
+               {
+
+               }
+
+               public void CacheBlob (string id, Stream stream)
+               {
+
+               }
+
+               public void Dispose ()
+               {
+                       
+               }
+       }
+}
\ No newline at end of file
diff --git a/mcs/tools/monkeydoc/Monkeydoc/generator.cs b/mcs/tools/monkeydoc/Monkeydoc/generator.cs
new file mode 100644 (file)
index 0000000..f0949b9
--- /dev/null
@@ -0,0 +1,27 @@
+using System;
+
+namespace MonkeyDoc
+{
+       // All type of documents that a generator may find as input
+       public enum DocumentType {
+               EcmaXml, // Our main monodoc format
+               EcmaSpecXml,
+               Man,
+               AddinXml,
+               MonoBook, // This is mostly XHTML already, just need a tiny bit of processing
+               Html,
+               TocXml, // Used by help source displaying some kind of toc of the content they host
+               PlainText,
+               ErrorXml
+       }
+
+       /* This interface defines a set of transformation engine
+        * that convert multiple documentation source to a single output format
+        */
+       public interface IDocGenerator<TOutput>
+       {
+               // This method is responsible for finding out the documentation type
+               // for the given ID and use the right engine internally
+               TOutput Generate (HelpSource hs, string internalId);
+       }
+}
\ No newline at end of file
diff --git a/mcs/tools/monkeydoc/Monkeydoc/generators/HtmlGenerator.cs b/mcs/tools/monkeydoc/Monkeydoc/generators/HtmlGenerator.cs
new file mode 100644 (file)
index 0000000..6cce5dc
--- /dev/null
@@ -0,0 +1,129 @@
+using System;
+using System.IO;
+using System.Text;
+using System.Linq;
+using System.Collections.Generic;
+
+using MonkeyDoc;
+
+namespace MonkeyDoc.Generators
+{
+       using Html;
+
+       interface IHtmlExporter
+       {
+               string CssCode { get; }
+               string Export (Stream input, Dictionary<string, string> extras);
+               string Export (string input, Dictionary<string, string> extras);
+       }
+
+       public class HtmlGenerator : IDocGenerator<string>
+       {
+               const string cachePrefix = "htmlcached#";
+
+               static string css_code;
+
+               IDocCache defaultCache;
+               static Dictionary<DocumentType, IHtmlExporter> converters;
+
+               static HtmlGenerator ()
+               {
+                       converters = new Dictionary<DocumentType, IHtmlExporter> {
+                               { DocumentType.Man, new Man2Html () },
+                               { DocumentType.TocXml, new Toc2Html () },
+                               { DocumentType.EcmaSpecXml, new Ecmaspec2Html () },
+                               { DocumentType.ErrorXml, new Error2Html () },
+                               { DocumentType.Html, new Idem () },
+                               { DocumentType.MonoBook, new MonoBook2Html () },
+                               { DocumentType.PlainText, new Idem () },
+                       };
+               }
+
+               public HtmlGenerator (IDocCache defaultCache)
+               {
+                       this.defaultCache = defaultCache;
+               }
+
+               public string Generate (HelpSource hs, string id)
+               {
+                       if (hs == null || string.IsNullOrEmpty (id))
+                               return MakeHtmlError ("Your request has found no candidate provider");
+                       var cache = defaultCache ?? hs.Cache;
+                       if (cache != null && cache.IsCached (MakeCacheKey (hs, id, null)))
+                               return cache.GetCachedString (MakeCacheKey (hs, id, null));
+
+                       IEnumerable<string> parts;
+                       if (hs.IsMultiPart (id, out parts))
+                               return GenerateMultiPart (hs, parts, id);
+
+                       if (hs.IsRawContent (id))
+                               return hs.GetText (id) ?? string.Empty;
+
+                       Dictionary<string, string> extraParams = null;
+                       DocumentType type = hs.GetDocumentTypeForId (id, out extraParams);
+                       if (cache != null && extraParams != null && cache.IsCached (MakeCacheKey (hs, id, extraParams)))
+                               return cache.GetCachedString (MakeCacheKey (hs, id, extraParams));
+
+                       IHtmlExporter exporter;
+                       if (!converters.TryGetValue (type, out exporter))
+                               return MakeHtmlError (string.Format ("Input type '{0}' not supported",
+                                                                    type.ToString ()));
+                       var result = hs.IsGeneratedContent (id) ? 
+                               exporter.Export (hs.GetCachedText (id), extraParams) :
+                               exporter.Export (hs.GetCachedHelpStream (id), extraParams);
+
+                       if (cache != null)
+                               cache.CacheText (MakeCacheKey (hs, id, extraParams), result);
+                       return result;
+               }
+
+               string GenerateMultiPart (HelpSource hs, IEnumerable<string> ids, string originalId)
+               {
+                       var sb = new StringBuilder ();
+                       foreach (var id in ids)
+                               sb.AppendLine (Generate (hs, id));
+
+                       var cache = defaultCache ?? hs.Cache;
+                       if (cache != null)
+                               cache.CacheText (MakeCacheKey (hs, originalId, null), sb.ToString ());
+                       return sb.ToString ();
+               }
+
+               public static string InlineCss {
+                       get {
+                               if (css_code != null)
+                                       return css_code;
+
+                               System.Reflection.Assembly assembly = System.Reflection.Assembly.GetAssembly (typeof (HtmlGenerator));
+                               Stream str_css = assembly.GetManifestResourceStream ("base.css");
+                               StringBuilder sb = new StringBuilder ((new StreamReader (str_css)).ReadToEnd());
+                               sb.Replace ("@@FONT_FAMILY@@", "Sans Serif");
+                               sb.Replace ("@@FONT_SIZE@@", "100%");
+                               css_code = sb.ToString () + converters.Values
+                                       .Select (c => c.CssCode)
+                                       .Where (css => !string.IsNullOrEmpty (css))
+                                       .DefaultIfEmpty (string.Empty)
+                                       .Aggregate (string.Concat);
+                               return css_code;
+                       }
+                       set { 
+                               css_code = value;
+                       }
+               }
+
+               string MakeHtmlError (string error)
+               {
+                       return string.Format ("<html><head></head><body><p>{0}</p></body></html>", error);
+               }
+
+               string MakeCacheKey (HelpSource hs, string page, IDictionary<string,string> extraParams)
+               {
+                       var key = cachePrefix + hs.SourceID + page;
+                       if (extraParams != null && extraParams.Count > 0) {
+                               var paramPart = string.Join ("-", extraParams.Select (kvp => kvp.Key + kvp.Value));
+                               key += '_' + paramPart;
+                       }
+                       return key;
+               }
+       }
+}
\ No newline at end of file
diff --git a/mcs/tools/monkeydoc/Monkeydoc/generators/html/Ecma2Html.cs b/mcs/tools/monkeydoc/Monkeydoc/generators/html/Ecma2Html.cs
new file mode 100644 (file)
index 0000000..2d4188f
--- /dev/null
@@ -0,0 +1,310 @@
+using System;
+using System.IO;
+using System.Text;
+using System.Linq;
+using System.Xml;
+using System.Xml.Xsl;
+using System.Xml.XPath;
+using System.Collections.Generic;
+
+using Mono.Documentation;
+using BF = System.Reflection.BindingFlags;
+
+namespace MonkeyDoc.Generators.Html
+{
+       public class Ecma2Html : IHtmlExporter
+       {
+               static string css_ecma;
+               static string js;
+               static XslCompiledTransform ecma_transform;
+               static XsltArgumentList args = new XsltArgumentList();
+               readonly ExtensionObject ExtObject = new ExtensionObject ();
+
+               public Ecma2Html ()
+               {
+                       args.AddExtensionObject("monodoc:///extensions", ExtObject);
+                       
+               }
+
+               public string CssCode {
+                       get {
+                               if (css_ecma != null)
+                                       return css_ecma;
+                               var assembly = typeof(Ecma2Html).Assembly;
+                               Stream str_css = assembly.GetManifestResourceStream ("mono-ecma.css");
+                               css_ecma = (new StreamReader (str_css)).ReadToEnd();
+                               return css_ecma;
+                       }
+               }
+
+               public string JsCode {
+                       get {
+                               if (js != null)
+                                       return js;
+                               var assembly = typeof(Ecma2Html).Assembly;
+                               Stream str_js = assembly.GetManifestResourceStream ("helper.js");
+                               js = (new StreamReader (str_js)).ReadToEnd();
+                               return js;
+                       }
+               }
+               
+               public string Htmlize (XmlReader ecma_xml)
+               {
+                       return Htmlize(ecma_xml, args);
+               }
+
+               public string Htmlize (XmlReader ecma_xml, XsltArgumentList args)
+               {
+                       EnsureTransform ();
+               
+                       var output = new StringBuilder ();
+                       ecma_transform.Transform (ecma_xml, 
+                                                 args, 
+                                                 XmlWriter.Create (output, ecma_transform.OutputSettings),
+                                                 CreateDocumentResolver ());
+                       return output.ToString ();
+               }
+
+               string GetViewMode (string url)
+               {
+                       return "foo";
+               }
+               
+               protected virtual XmlResolver CreateDocumentResolver ()
+               {
+                       // results in using XmlUrlResolver
+                       return null;
+               }
+
+               public string Export (Stream stream, Dictionary<string, string> extraArgs)
+               {
+                       return Htmlize (XmlReader.Create (stream));
+               }
+
+               public string Export (string input, Dictionary<string, string> extraArgs)
+               {
+                       return Htmlize (XmlReader.Create (new StringReader (input)));
+               }
+               
+               static void EnsureTransform ()
+               {
+                       if (ecma_transform == null) {
+                               ecma_transform = new XslCompiledTransform ();
+                               var assembly = System.Reflection.Assembly.GetCallingAssembly ();
+                       
+                               Stream stream = assembly.GetManifestResourceStream ("mono-ecma-css.xsl");
+                               XmlReader xml_reader = new XmlTextReader (stream);
+                               XmlResolver r = new ManifestResourceResolver (".");
+                               ecma_transform.Load (xml_reader, XsltSettings.TrustedXslt, r);                  
+                       }
+               }
+
+               public class ExtensionObject
+               {
+                       bool quiet = true;
+
+                       public string Colorize(string code, string lang)
+                       {
+                               return Mono.Utilities.Colorizer.Colorize(code,lang);
+                       }
+
+                       // Used by stylesheet to nicely reformat the <see cref=> tags. 
+                       public string MakeNiceSignature(string sig, string contexttype)
+                       {
+                               if (sig.Length < 3)
+                                       return sig;
+                               if (sig[1] != ':')
+                                       return sig;
+
+                               char s = sig[0];
+                               sig = sig.Substring(2);
+                       
+                               switch (s) {
+                               case 'N': return sig;
+                               case 'T': return ShortTypeName (sig, contexttype);
+
+                               case 'C': case 'M': case 'P': case 'F': case 'E':
+                                       string type, mem, arg;
+                                       
+                                       // Get arguments
+                                       int paren;
+                                       if (s == 'C' || s == 'M')
+                                               paren = sig.IndexOf("(");
+                                       else if (s == 'P')
+                                               paren = sig.IndexOf("[");
+                                       else
+                                               paren = 0;
+                                       
+                                       if (paren > 0 && paren < sig.Length-1) {
+                                               string[] args = sig.Substring(paren+1, sig.Length-paren-2).Split(',');                                          
+                                               for (int i = 0; i < args.Length; i++)
+                                                       args[i] = ShortTypeName(args[i], contexttype);
+                                               arg = "(" + String.Join(", ", args) + ")";
+                                               sig = sig.Substring(0, paren); 
+                                       } else {
+                                               arg = string.Empty;
+                                       }
+
+                                       // Get type and member names
+                                       int dot = sig.LastIndexOf(".");
+                                       if (s == 'C' || dot <= 0 || dot == sig.Length-1) {
+                                               mem = string.Empty;
+                                               type = sig;
+                                       } else {
+                                               type = sig.Substring(0, dot);
+                                               mem = sig.Substring(dot);
+                                       }
+                                               
+                                       type = ShortTypeName(type, contexttype);
+                                       
+                                       return type + mem + arg;
+
+                               default:
+                                       return sig;
+                               }
+                       }
+
+                       static string ShortTypeName(string name, string contexttype)
+                       {
+                               int dot = contexttype.LastIndexOf(".");
+                               if (dot < 0) return name;
+                               string contextns = contexttype.Substring(0, dot+1);
+
+                               if (name == contexttype)
+                                       return name.Substring(dot+1);
+                       
+                               if (name.StartsWith(contextns))
+                                       return name.Substring(contextns.Length);
+                       
+                               return name.Replace("+", ".");
+                       }
+
+                       string MonoImpInfo(string assemblyname, string typename, string membername, string arglist, bool strlong)
+                       {
+                               if (quiet)
+                                       return string.Empty;
+                               
+                               var a = new List<string> ();
+                               if (!string.IsNullOrEmpty (arglist)) a.Add (arglist);
+                               return MonoImpInfo(assemblyname, typename, membername, a, strlong);
+                       }
+
+                       string MonoImpInfo(string assemblyname, string typename, string membername, XPathNodeIterator itr, bool strlong)
+                       {
+                               if (quiet)
+                                       return string.Empty;
+                               
+                               var rgs = itr.Cast<XPathNavigator> ().Select (nav => nav.Value).ToList ();
+                       
+                               return MonoImpInfo (assemblyname, typename, membername, rgs, strlong);
+                       }
+               
+                       string MonoImpInfo(string assemblyname, string typename, string membername, List<string> arglist, bool strlong)
+                       {
+                               try {
+                                       System.Reflection.Assembly assembly = null;
+                               
+                                       try {
+                                               assembly = System.Reflection.Assembly.LoadWithPartialName(assemblyname);
+                                       } catch (Exception) {
+                                               // nothing.
+                                       }
+                               
+                                       if (assembly == null) {
+                                               /*if (strlong) return "The assembly " + assemblyname + " is not available to MonoDoc.";
+                                                 else return string.Empty;*/
+                                               return string.Empty; // silently ignore
+                                       }
+
+                                       Type t = assembly.GetType(typename, false);
+                                       if (t == null) {
+                                               if (strlong)
+                                                       return typename + " has not been implemented.";
+                                               else
+                                                       return "Not implemented.";
+                                       }
+
+                                       // The following code is flakey and fails to find existing members
+                                       return string.Empty;
+                               } catch (Exception) {
+                                       return string.Empty;
+                               }
+                       }
+               
+                       string MonoImpInfo(System.Reflection.MemberInfo mi, string itemtype, bool strlong)
+                       {
+                               if (quiet)
+                                       return string.Empty;
+                               
+                               string s = string.Empty;
+
+                               object[] atts = mi.GetCustomAttributes(true);
+                               int todoctr = 0;
+                               foreach (object att in atts) if (att.GetType().Name == "MonoTODOAttribute") todoctr++;
+
+                               if (todoctr > 0) {
+                                       if (strlong)
+                                               s = "This " + itemtype + " is marked as being unfinished.<BR/>\n";
+                                       else 
+                                               s = "Unfinished.";
+                               }
+
+                               return s;
+                       }
+
+                       public string MonoImpInfo(string assemblyname, string typename, bool strlong)
+                       {
+                               if (quiet)
+                                       return string.Empty;
+                               
+                               try {
+                                       if (assemblyname == string.Empty)
+                                               return string.Empty;
+
+                                       var assembly = System.Reflection.Assembly.LoadWithPartialName(assemblyname);
+                                       if (assembly == null)
+                                               return string.Empty;
+
+                                       Type t = assembly.GetType(typename, false);
+                                       if (t == null) {
+                                               if (strlong)
+                                                       return typename + " has not been implemented.";
+                                               else
+                                                       return "Not implemented.";
+                                       }
+
+                                       string s = MonoImpInfo(t, "type", strlong);
+
+                                       if (strlong) {
+                                               var mis = t.GetMembers (BF.Static | BF.Instance | BF.Public | BF.NonPublic);
+
+                                               // Scan members for MonoTODO attributes
+                                               int mctr = 0;
+                                               foreach (var mi in mis) {
+                                                       string mii = MonoImpInfo(mi, null, false);
+                                                       if (mii != string.Empty) mctr++; 
+                                               }
+                                               if (mctr > 0) {
+                                                       s += "This type has " + mctr + " members that are marked as unfinished.<BR/>";
+                                               }
+                                       }
+
+                                       return s;
+
+                               } catch (Exception) {
+                                       return string.Empty;
+                               }                       
+                       }
+
+                       public bool MonoEditing ()
+                       {
+                               return false;
+                       }
+               
+                       public bool IsToBeAdded(string text)
+                       {
+                               return text.StartsWith ("To be added");
+                       }
+               }
+       }
+}
\ No newline at end of file
diff --git a/mcs/tools/monkeydoc/Monkeydoc/generators/html/Ecmaspec2Html.cs b/mcs/tools/monkeydoc/Monkeydoc/generators/html/Ecmaspec2Html.cs
new file mode 100644 (file)
index 0000000..95f4064
--- /dev/null
@@ -0,0 +1,66 @@
+using System;
+using System.IO;
+using System.Xml;
+using System.Xml.Xsl;
+using System.Xml.XPath;
+using System.Collections.Generic;
+
+namespace MonkeyDoc.Generators.Html
+{
+       public class Ecmaspec2Html : IHtmlExporter
+       {
+               static string css_ecmaspec;
+               static XslTransform ecma_transform;
+               static XsltArgumentList args = new XsltArgumentList();
+
+               public string CssCode {
+                       get {
+                               if (css_ecmaspec != null)
+                                       return css_ecmaspec;
+                               System.Reflection.Assembly assembly = System.Reflection.Assembly.GetCallingAssembly ();
+                               Stream str_css = assembly.GetManifestResourceStream ("ecmaspec.css");
+                               css_ecmaspec = (new StreamReader (str_css)).ReadToEnd ();
+                               return css_ecmaspec;
+                       }
+               }
+
+               class ExtObj
+               {
+                       public string Colorize (string code, string lang)
+                       {
+                               return Mono.Utilities.Colorizer.Colorize (code, lang);
+                       }
+               }
+
+               public string Export (Stream stream, Dictionary<string, string> extraArgs)
+               {
+                       return Htmlize (new XPathDocument (stream));
+               }
+
+               public string Export (string input, Dictionary<string, string> extraArgs)
+               {
+                       return Htmlize (new XPathDocument (new StringReader (input)));
+               }
+
+               static string Htmlize (XPathDocument ecma_xml)
+               {
+                       if (ecma_transform == null){
+                               ecma_transform = new XslTransform ();
+                               System.Reflection.Assembly assembly = System.Reflection.Assembly.GetCallingAssembly ();
+                               Stream stream;
+                               stream = assembly.GetManifestResourceStream ("ecmaspec-html-css.xsl");
+
+                               XmlReader xml_reader = new XmlTextReader (stream);
+                               ecma_transform.Load (xml_reader, null, null);
+                               args.AddExtensionObject ("monodoc:///extensions", new ExtObj ()); 
+                       }
+               
+                       if (ecma_xml == null) return "";
+
+                       StringWriter output = new StringWriter ();
+                       ecma_transform.Transform (ecma_xml, args, output, null);
+               
+                       return output.ToString ();
+               }
+       }
+}
\ No newline at end of file
diff --git a/mcs/tools/monkeydoc/Monkeydoc/generators/html/Error2Html.cs b/mcs/tools/monkeydoc/Monkeydoc/generators/html/Error2Html.cs
new file mode 100644 (file)
index 0000000..167d1f5
--- /dev/null
@@ -0,0 +1,110 @@
+using System;
+using System.IO;
+using System.Linq;
+using System.Xml;
+using System.Xml.XPath;
+using System.Collections.Generic;
+
+namespace MonkeyDoc.Generators.Html
+{
+       public class Error2Html : IHtmlExporter
+       {
+               public string Export (string input, Dictionary<string, string> extraArgs)
+               {
+                       return Htmlize (new XPathDocument (new StringReader (input)));
+               }
+
+               public string Export (Stream input, Dictionary<string, string> extraArgs)
+               {
+                       return Htmlize (new XPathDocument (input));
+               }
+
+               public string CssCode {
+                       get {
+                               return @"
+                                        #error_ref { 
+                                           background: #debcb0; 
+                                           border: 2px solid #782609; 
+                                        }
+                                        div.summary {
+                                                font-size: 110%;
+                                                font-weight: bolder;
+                                        }
+                                        div.details {
+                                                font-size: 110%;
+                                                font-weight: bolder;
+                                        }
+                                        div.code_example {
+                                               background: #f5f5dd;
+                                               border: 1px solid black;
+                                               padding-left: 1em;
+                                               padding-bottom: 1em;
+                                               margin-top: 1em;
+                                               white-space: pre;
+                                               margin-bottom: 1em;
+                                        }
+                                        div.code_ex_title {
+                                               position: relative;
+                                               top: -1em;
+                                               left: 30%;
+                                               background: #cdcd82;
+                                               border: 1px solid black;
+                                               color: black;
+                                               font-size: 65%;
+                                               text-transform: uppercase;
+                                               width: 40%;
+                                               padding: 0.3em;
+                                               text-align: center;
+                                        }";
+                       }
+               }
+
+               public string Htmlize (IXPathNavigable doc)
+               {
+                       var navigator = doc.CreateNavigator ();
+                       var errorName = navigator.SelectSingleNode ("//ErrorDocumentation/ErrorName");
+                       var details = navigator.SelectSingleNode ("//ErrorDocumentation/Details");
+
+                       StringWriter sw = new StringWriter ();
+                       XmlWriter w = new XmlTextWriter (sw);
+                       
+                       WriteElementWithClass (w, "div", "header");
+                       w.WriteAttributeString ("id", "error_ref");
+                       WriteElementWithClass (w, "div", "subtitle", "Compiler Error Reference");
+                       WriteElementWithClass (w, "div", "title", "Error " + (errorName == null ? string.Empty : errorName.Value));
+                       w.WriteEndElement ();
+
+                       if (details != null) {
+                               WriteElementWithClass (w, "div", "summary", "Summary");
+
+                               var summary = details.SelectSingleNode ("/Summary");
+                               w.WriteValue (summary == null ? string.Empty : summary.Value);
+                               
+                               WriteElementWithClass (w, "div", "details", "Details");
+                               var de = details.SelectSingleNode ("/Details");
+                               w.WriteValue (de == null ? string.Empty : de.Value);
+                       }
+                       
+                       foreach (XPathNavigator xmp in navigator.Select ("//ErrorDocumentation/Examples/string")) {
+                               WriteElementWithClass (w, "div", "code_example");
+                               WriteElementWithClass (w, "div", "code_ex_title", "Example");
+                               w.WriteRaw (Mono.Utilities.Colorizer.Colorize (xmp.Value, "c#"));;
+                               w.WriteEndElement ();
+                       }
+                       
+                       w.Close ();
+                       
+                       return sw.ToString ();
+               }
+
+               void WriteElementWithClass (XmlWriter w, string element, string cls, string content = null)
+               {
+                       w.WriteStartElement (element);
+                       w.WriteAttributeString ("class", cls);
+                       if (!string.IsNullOrEmpty (content)) {
+                               w.WriteValue (content);
+                               w.WriteEndElement ();
+                       }
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Monkeydoc/generators/html/Idem.cs b/mcs/tools/monkeydoc/Monkeydoc/generators/html/Idem.cs
new file mode 100644 (file)
index 0000000..0a58b21
--- /dev/null
@@ -0,0 +1,34 @@
+using System;
+using System.IO;
+using System.Text;
+using System.Collections.Generic;
+
+using MonkeyDoc;
+using MonkeyDoc.Generators;
+
+namespace MonkeyDoc.Generators.Html
+{
+       // Input is expected to be already HTML so just return it
+       public class Idem : IHtmlExporter
+       {
+               public string CssCode {
+                       get {
+                               return string.Empty;
+                       }
+               }
+
+               public string Export (Stream input, Dictionary<string, string> extraArgs)
+               {
+                       if (input == null)
+                               return null;
+                       return new StreamReader (input).ReadToEnd ();
+               }
+
+               public string Export (string input, Dictionary<string, string> extraArgs)
+               {
+                       if (string.IsNullOrEmpty (input))
+                               return null;
+                       return input;
+               }
+       }
+}
\ No newline at end of file
diff --git a/mcs/tools/monkeydoc/Monkeydoc/generators/html/Man2Html.cs b/mcs/tools/monkeydoc/Monkeydoc/generators/html/Man2Html.cs
new file mode 100644 (file)
index 0000000..68ed5ed
--- /dev/null
@@ -0,0 +1,316 @@
+using System;
+using System.IO;
+using System.Text;
+using System.Collections.Generic;
+
+using MonkeyDoc;
+using MonkeyDoc.Generators;
+
+namespace MonkeyDoc.Generators.Html
+{
+       public class Man2Html : IHtmlExporter
+       {
+               public string CssCode {
+                       get {
+                               return string.Empty;
+                       }
+               }
+
+               public string Export (Stream input, Dictionary<string, string> extraArgs)
+               {
+                       if (input == null)
+                               return null;
+                       return GetTextFromReader (new StreamReader (input));
+               }
+
+               public string Export (string input, Dictionary<string, string> extraArgs)
+               {
+                       if (string.IsNullOrEmpty (input))
+                               return null;
+                       return GetTextFromReader (new StringReader (input));
+               }
+
+               public static string GetTextFromReader (TextReader file)
+               {
+                       string line;
+                       StateInfo s = new StateInfo ();
+
+                       while ((line = file.ReadLine ()) != null)
+                               ProcessLine (line, s);
+
+                       return s.output.ToString ();
+               }
+
+               enum ListState {
+                       None,
+                       Start,
+                       Title,
+               }
+
+               class StateInfo {
+                       public ListState ls;
+                       public Stack<string> tags = new Stack<string> ();
+                       public StringBuilder output = new StringBuilder ();
+               }
+
+               static void ProcessLine (string line, StateInfo s)
+               {
+                       string[] parts = SplitLine (line);
+                       switch (parts [0]) {
+                       case ".\\\"": // comments
+                       case ".de":   // define macro
+                       case ".if":   // if
+                       case ".ne":   // ???
+                       case "..":    // end macro
+                               // ignore
+                               break;
+                       case ".I":
+                               s.output.Append ("<i>");
+                               Translate (parts, 1, s.output);
+                               s.output.Append ("</i>");
+                               break;
+                       case ".B":
+                               s.output.Append ("<b>");
+                               Translate (parts, 1, s.output);
+                               s.output.Append ("</b>");
+                               break;
+                       case ".br":
+                               Translate (parts, 1, s.output);
+                               s.output.Append ("<br />");
+                               break;
+                       case ".nf":
+                               Expect (s, "</p>");
+                               s.output.Append ("<pre>\n");
+                               s.tags.Push ("</pre>");
+                               break;
+                       case ".fi":
+                               Expect (s, "</pre>");
+                               break;
+                       case ".PP":
+                               Expect (s, "</p>", "</dd>", "</dl>");
+                               goto case ".Sp";
+                       case ".Sp":
+                               Expect (s, "</p>");
+                               s.output.Append ("<p>");
+                               Translate (parts, 1, s.output);
+                               s.tags.Push ("</p>");
+                               break;
+                       case ".RS":
+                               Expect (s, "</p>");
+                               s.output.Append ("<blockquote>");
+                               s.tags.Push ("</blockquote>");
+                               break;
+                       case ".RE":
+                               ClearUntil (s, "</blockquote>");
+                               break;
+                       case ".SH":
+                               ClearAll (s);
+                               s.output.Append ("<h2>");
+                               Translate (parts, 1, s.output);
+                               s.output.Append ("</h2>")
+                                       .Append ("<blockquote>");
+                               s.tags.Push ("</blockquote>");
+                               break;
+                       case ".SS":
+                               s.output.Append ("<h3>");
+                               Translate (parts, 1, s.output);
+                               s.output.Append ("</h3>");
+                               break;
+                       case ".TH": {
+                               ClearAll (s);
+                               string name = "", extra = "";
+                               if (parts.Length >= 4 && parts [2].Trim ().Length == 0) {
+                                       name = parts [1] + "(" + parts [3] + ")";
+                                       if (parts.Length > 4) {
+                                               int start = 4;
+                                               if (parts [start].Trim ().Length == 0)
+                                                       ++start;
+                                               extra = string.Join ("", parts, start, parts.Length-start);
+                                       }
+                               }
+                               else
+                                       name = string.Join ("", parts, 1, parts.Length-1);
+                               s.output.Append ("<table width=\"100%\" bgcolor=\"#b0c4da\">" + 
+                                                "<tr colspan=\"2\"><td>Manual Pages</td></tr>\n" +
+                                                "<tr><td><h3>");
+                               Translate (name, s.output);
+                               s.output.Append ("</h3></td><td align=\"right\">");
+                               Translate (extra, s.output);
+                               s.output.Append ("</td></tr></table>");
+                               break;
+                       }
+                       case ".TP":
+                               Expect (s, "</p>");
+                               if (s.tags.Count > 0 && s.tags.Peek ().ToString () != "</dd>") {
+                                       s.output.Append ("<dl>");
+                                       s.tags.Push ("</dl>");
+                               }
+                               else
+                                       Expect (s, "</dd>");
+                               s.output.Append ("<dt>");
+                               s.tags.Push ("</dt>");
+                               s.ls = ListState.Start;
+                               break;
+                       default:
+                               Translate (line, s.output);
+                               break;
+                       }
+                       if (s.ls == ListState.Start)
+                               s.ls = ListState.Title;
+                       else if (s.ls == ListState.Title) {
+                               Expect (s, "</dt>");
+                               s.output.Append ("<dd>");
+                               s.tags.Push ("</dd>");
+                               s.ls = ListState.None;
+                       }
+                       s.output.Append ("\n");
+               }
+
+               static string[] SplitLine (string line)
+               {
+                       if (line.Length > 1 && line [0] != '.')
+                               return new string[]{null, line};
+
+                       int i;
+                       for (i = 0; i < line.Length; ++i) {
+                               if (char.IsWhiteSpace (line, i))
+                                       break;
+                       }
+
+                       if (i == line.Length)
+                               return new string[]{line};
+
+                       var pieces = new List<string> ();
+                       pieces.Add (line.Substring (0, i));
+                       bool inQuotes = false;
+                       bool prevWs   = true;
+                       ++i;
+                       int start = i;
+                       for ( ; i < line.Length; ++i) {
+                               char c = line [i];
+                               if (inQuotes) {
+                                       if (c == '"') {
+                                               Add (pieces, line, start, i);
+                                               start = i+1;
+                                               inQuotes = false;
+                                       }
+                               }
+                               else {
+                                       if (prevWs && c == '"') {
+                                               Add (pieces, line, start, i);
+                                               start = i+1;
+                                               inQuotes = true;
+                                       }
+                                       else if (char.IsWhiteSpace (c)) {
+                                               if (!prevWs) {
+                                                       Add (pieces, line, start, i);
+                                                       start = i;
+                                               }
+                                               prevWs = true;
+                                       }
+                                       else {
+                                               if (prevWs) {
+                                                       Add (pieces, line, start, i);
+                                                       start = i;
+                                               }
+                                               prevWs = false;
+                                       }
+                               }
+                       }
+                       if (start > 0 && start != line.Length)
+                               pieces.Add (line.Substring (start, line.Length-start));
+                       return pieces.ToArray ();
+               }
+
+               static void Add (List<string> pieces, string line, int start, int end)
+               {
+                       if (start == end)
+                               return;
+                       pieces.Add (line.Substring (start, end-start));
+               }
+
+               static void Expect (StateInfo s, params string[] expected)
+               {
+                       string e;
+                       while (s.tags.Count > 0 && 
+                              Array.IndexOf (expected, (e = s.tags.Peek ().ToString ())) >= 0) {
+                               s.output.Append (s.tags.Pop ().ToString ());
+                       }
+               }
+
+               static void ClearUntil (StateInfo s, string required)
+               {
+                       string e;
+                       while (s.tags.Count > 0 && 
+                              (e = s.tags.Peek ().ToString ()) != required) {
+                               s.output.Append (s.tags.Pop ().ToString ());
+                       }
+                       if (e == required)
+                               s.output.Append (s.tags.Pop ().ToString ());
+               }
+
+               static void ClearAll (StateInfo s)
+               {
+                       while (s.tags.Count > 0)
+                               s.output.Append (s.tags.Pop ().ToString ());
+               }
+
+               static void Translate (string[] lines, int startIndex, StringBuilder output)
+               {
+                       if (lines.Length <= startIndex)
+                               return;
+                       do {
+                               Translate (lines [startIndex++], output);
+                               if (startIndex == lines.Length)
+                                       break;
+                       } while (startIndex < lines.Length);
+               }
+
+               static void Translate (string line, StringBuilder output)
+               {
+                       string span = null;
+                       int start = output.Length;
+                       for (int i = 0; i < line.Length; ++i) {
+                               switch (line [i]) {
+                               case '\\': {
+                                       if ((i+2) < line.Length && line [i+1] == 'f') {
+                                               if (line [i+2] == 'I') {
+                                                       output.Append ("<i>");
+                                                       span = "</i>";
+                                               }
+                                               else if (line [i+2] == 'B') {
+                                                       output.Append ("<b>");
+                                                       span = "</b>";
+                                               }
+                                               else if (line [i+2] == 'R' || line [i+2] == 'P') {
+                                                       output.Append (span);
+                                               }
+                                               else
+                                                       goto default;
+                                               i += 2;
+                                       }
+                                       else if ((i+1) < line.Length) {
+                                               output.Append (line [i+1]);
+                                               ++i;
+                                       }
+                                       else
+                                               goto default;
+                                       break;
+                               }
+                               case '<':
+                                       output.Append ("&lt;");
+                                       break;
+                               case '>':
+                                       output.Append ("&gt;");
+                                       break;
+                               case '&':
+                                       output.Append ("&amp;");
+                                       break;
+                               default:
+                                       output.Append (line [i]);
+                                       break;
+                               }
+                       }
+               }
+       }
+}
\ No newline at end of file
diff --git a/mcs/tools/monkeydoc/Monkeydoc/generators/html/MonoBook2Html.cs b/mcs/tools/monkeydoc/Monkeydoc/generators/html/MonoBook2Html.cs
new file mode 100644 (file)
index 0000000..89a531c
--- /dev/null
@@ -0,0 +1,87 @@
+using System;
+using System.IO;
+using System.Text;
+using System.Xml;
+using System.Collections.Generic;
+
+using MonkeyDoc;
+using MonkeyDoc.Generators;
+
+namespace MonkeyDoc.Generators.Html
+{
+       // Input is expected to be already HTML so just return it
+       public class MonoBook2Html : IHtmlExporter
+       {
+               public string CssCode {
+                       get {
+                               return @"   h3 { 
+       font-size: 18px;
+       padding-bottom: 4pt;
+       border-bottom: 2px solid #dddddd;
+   }
+       
+   .api {
+     border: 1px solid;
+     padding: 10pt;
+     margin: 10pt;
+   } 
+
+   .api-entry { 
+       border-bottom: none;
+       font-size: 18px;
+   }
+
+   .prototype {
+     border: 1px solid;
+     background-color: #f2f2f2;
+     padding: 5pt;
+     margin-top: 5pt;
+     margin-bottom: 5pt;  
+   } 
+
+   .header {
+     border: 1px solid !important;
+     padding: 0 0 5pt 5pt !important;
+     margin: 10pt !important;
+     white-space: pre !important;
+       font-family: monospace !important;
+     font-weight: normal !important;
+     font-size: 1em !important;
+   }
+    
+   .code {
+     border: 1px solid;
+     padding: 0 0 5pt 5pt;
+     margin: 10pt;
+     white-space: pre;
+       font-family: monospace;
+   }
+";
+                       }
+               }
+
+               public string Export (Stream input, Dictionary<string, string> extraArgs)
+               {
+                       if (input == null)
+                               return null;
+                       return FromXmlReader (XmlReader.Create (input));
+               }
+
+               public string Export (string input, Dictionary<string, string> extraArgs)
+               {
+                       if (string.IsNullOrEmpty (input))
+                               return null;
+                       return FromXmlReader (XmlReader.Create (new StringReader (input)));
+               }
+
+               public string FromXmlReader (XmlReader reader)
+               {
+                       if (!reader.ReadToDescendant ("head"))
+                               return null;
+                       if (!reader.ReadToNextSibling ("body"))
+                               return null;
+
+                       return reader.ReadInnerXml ();
+               }
+       }
+}
\ No newline at end of file
diff --git a/mcs/tools/monkeydoc/Monkeydoc/generators/html/Toc2Html.cs b/mcs/tools/monkeydoc/Monkeydoc/generators/html/Toc2Html.cs
new file mode 100644 (file)
index 0000000..eef17c2
--- /dev/null
@@ -0,0 +1,44 @@
+using System;
+using System.IO;
+using System.Xml;
+using System.Xml.Xsl;
+using System.Xml.XPath;
+using System.Reflection;
+using System.Collections.Generic;
+
+namespace MonkeyDoc.Generators.Html
+{
+       public class Toc2Html : IHtmlExporter
+       {
+               XslTransform transform;
+
+               public Toc2Html ()
+               {
+                       transform = new XslTransform ();
+                       var assembly = Assembly.GetCallingAssembly ();
+                       var stream = assembly.GetManifestResourceStream ("toc-html.xsl");
+                       XmlReader xml_reader = new XmlTextReader (stream);
+                       transform.Load (xml_reader, null, null);
+               }
+
+               public string Export (Stream input, Dictionary<string, string> extraArgs)
+               {
+                       var output = new StringWriter ();
+                       transform.Transform (new XPathDocument (input), null, output, null);
+                       return output.ToString ();
+               }
+
+               public string Export (string input, Dictionary<string, string> extraArgs)
+               {
+                       var output = new StringWriter ();
+                       transform.Transform (new XPathDocument (new StringReader (input)), null, output, null);
+                       return output.ToString ();
+               }
+
+               public string CssCode {
+                       get {
+                               return string.Empty;
+                       }
+               }
+       }
+}
\ No newline at end of file
diff --git a/mcs/tools/monkeydoc/Monkeydoc/index.cs b/mcs/tools/monkeydoc/Monkeydoc/index.cs
new file mode 100644 (file)
index 0000000..62e4e31
--- /dev/null
@@ -0,0 +1,360 @@
+//
+// index.cs: Handling of the index files
+//
+// Author:
+//   Miguel de Icaza (miguel@xamarin.com)
+//
+// (C) 2003 Ximian, Inc.
+// Copyright 2003-2011 Novell Inc
+// Copyright 2011 Xamarin Inc.
+//
+// Possible file format optimizations:
+//   * Do not use 4 bytes for each index entry, use 3 bytes
+//   * Find a way of compressing strings, there are plenty of duplicates
+//     Find common roots, and use an encoding that uses a root to compress data.
+//     "System", "System.Data", "System.Data class"
+//     0: PLAIN: "System"
+//     1: PLAIN: " class"
+//     2: LINK0 PLAIN ".DATA"
+//     3: LINK0 LINK1
+//     
+//     Maybe split everything at spaces and dots, and encode that:
+//     string-1-idx "System."
+//     string-1-idx "Data"
+//     2-items [ string-1-idx string-2-idx]
+//
+//     Other variations are possible;  Like Archive "System", "System." when we
+//     see "System.Data".
+//
+//
+
+using System;
+using System.IO;
+using System.Text;
+using System.Collections;
+
+namespace MonkeyDoc
+{
+
+       public class Topic  {
+               public readonly string Caption;
+               public readonly string SortKey;
+               public readonly string Url;
+
+               public Topic (string caption, string sort_key, string url)
+               {
+                       Caption = caption;
+                       SortKey = sort_key;
+                       Url = url;
+               }
+       }
+
+       public class IndexEntry {
+               public int Position;
+               public object topics;
+               public int Count;
+               
+               public void Add (Topic t)
+               {
+                       Count++;
+                       if (topics == null)
+                               topics = t;
+                       else {
+                               if (!(topics is ArrayList)){
+                                       Topic temp = (Topic) topics;
+
+                                       topics = new ArrayList ();
+                                       ((ArrayList)topics).Add (temp);
+                               }
+                               ((ArrayList)topics).Add (t);
+                       }
+               }
+
+               public Topic this [int idx] {
+                       get {
+                               if (topics is Topic){
+                                       if (idx == 0)
+                                               return (Topic) topics;
+                                       else
+                                               throw new Exception ("Out of range index");
+                               } else {
+                                       return (Topic) (((ArrayList)topics) [idx]);
+                               }
+                       }
+               }
+
+               //
+               // Constructor from a stream
+               //
+               public IndexEntry (FileStream fs, BinaryReader reader, int position)
+               {
+                       Count = reader.ReadInt32 ();
+                       int caption_offset = reader.ReadInt32 ();
+                       string caption;
+               
+                       if (Count == 1){
+                               int url_offset = reader.ReadInt32 ();
+                               fs.Position = caption_offset;
+                               caption = reader.ReadString ();
+                               fs.Position = url_offset;
+                               string url = reader.ReadString ();
+                               topics = new Topic (caption, "", url);
+                       } else {
+                               ArrayList l = new ArrayList (Count);
+                               topics = l;
+                               int [] offsets = new int [Count];
+                               for (int i = 0; i < Count; i++){
+                                       offsets [i] = reader.ReadInt32 ();
+                               }
+                               fs.Position = caption_offset;
+                               caption = reader.ReadString ();
+                               for (int i = 0; i < Count; i++){
+                                       fs.Position = offsets [i];
+                                       string url = reader.ReadString ();
+                                       l.Add (new Topic (caption, "", url));
+                               }
+                       }
+               }
+
+               //      Topic ReadTopic (FileStream fs, BinaryReader reader, ref string caption)
+               //      {
+               //              int caption_offset = -1;
+               //              if (caption == null)
+               //                      caption_offset = reader.ReadInt32 ();
+               //              int url_offset = reader.ReadInt32 ();
+               //
+               //              if (caption == null){
+               //                      fs.Position = caption_offset;
+               //                      caption = reader.ReadString ();
+               //              }
+               //              fs.Position = url_offset;
+               //              string url = reader.ReadString ();
+               //
+               //              return new Topic (caption, "", url);
+               //      }
+       
+               //
+               // Regular constructor
+       
+               public IndexEntry ()
+               {
+               }
+
+               public void WriteTopics (IndexMaker maker, Stream stream, BinaryWriter writer)
+               {
+                       //
+                       // Convention: entries with the same SortKey should have the same Caption
+                       //
+                       Position = (int) stream.Position;
+                       writer.Write (Count);
+
+                       if (topics is ArrayList){
+                               bool first = true;
+                               foreach (Topic t in (ArrayList) topics){
+                                       if (first){
+                                               writer.Write (maker.GetCode (t.Caption));
+                                               first = false;
+                                       }
+                                       writer.Write (maker.GetCode (t.Url));
+                               }
+                       } else {
+                               Topic t = (Topic) topics;
+
+                               writer.Write (maker.GetCode (t.Caption));
+                               writer.Write (maker.GetCode (t.Url));
+                       }
+               }
+       }
+
+       public class IndexMaker {
+               Hashtable entries = new Hashtable ();
+               Hashtable all_strings = new Hashtable ();
+
+               void add_string (string s)
+               {
+                       if (all_strings.Contains (s))
+                               return;
+                       all_strings [s] = 0;
+               }
+       
+               public void AddTopic (Topic topic)
+               {
+                       IndexEntry entry = (IndexEntry) entries [topic.SortKey];
+                       if (entry == null){
+                               entry = new IndexEntry ();
+                               entries [topic.SortKey] = entry;
+                       }
+
+                       add_string (topic.SortKey);
+                       add_string (topic.Caption);
+                       add_string (topic.Url);
+                       entry.Add (topic);
+               }
+
+               public void Add (string caption, string sort_key, string url)
+               {
+                       Topic t = new Topic (caption, sort_key, url);
+                       AddTopic (t);
+               }
+       
+               void SaveStringTable (Stream stream, BinaryWriter writer)
+               {
+                       ICollection k = all_strings.Keys;
+                       string [] ks = new string [k.Count];
+                       k.CopyTo (ks, 0);
+               
+                       foreach (string s in ks){
+                               int pos = (int) stream.Position;
+                               writer.Write (s);
+                               all_strings [s] = pos;
+                       }
+               }
+
+               public int GetCode (string s)
+               {
+                       return (int) all_strings [s];
+               }
+
+               int index_position;
+       
+               void SaveTopics (Stream stream, BinaryWriter writer)
+               {
+                       //
+                       // Convention: entries with the same SortKey should have the same Caption
+                       //
+                       foreach (IndexEntry e in entries.Values)
+                               e.WriteTopics (this, stream, writer);
+               }
+
+               void SaveIndexEntries (Stream stream, BinaryWriter writer)
+               {
+                       index_position = (int) stream.Position;
+                       writer.Write (entries.Count);
+                       ICollection keys = entries.Keys;
+                       string [] keys_name = new string [keys.Count];
+                       keys.CopyTo (keys_name, 0);
+                       Array.Sort (keys_name, new NameSort ());
+               
+                       foreach (string s in keys_name){
+                               IndexEntry e = (IndexEntry) entries [s];
+                               writer.Write (e.Position);
+                       }
+               }
+
+               class NameSort : IComparer {
+                       public int Compare (object a, object b)
+                       {
+                               string sa = (string) a;
+                               string sb = (string) b;
+
+                               return String.Compare (sa, sb, StringComparison.OrdinalIgnoreCase);
+                       }
+               }
+       
+               public void Save (string filename)
+               {
+                       Encoding utf8 = new UTF8Encoding (false, true);
+
+                       using (FileStream fs = File.OpenWrite (filename)){
+                               BinaryWriter writer = 
+                                       new BinaryWriter (fs, utf8);
+                               writer.Write (new byte [] { (byte) 'M', 
+                                                           (byte) 'o', (byte) 'i', 
+                                                           (byte) 'x'});
+
+                               // Leave room for pointer
+                               fs.Position = 8;
+
+                               SaveStringTable (fs, writer);
+                               SaveTopics (fs, writer);
+
+                               // index_position is set here
+                       
+                               SaveIndexEntries (fs, writer);
+
+                               fs.Position = 4;
+                               writer.Write (index_position);
+                       }
+               }
+       }
+
+       public interface IListModel {
+               int Rows {get; }
+               string GetValue (int row);
+               string GetDescription (int row);
+       }
+
+       public class IndexReader : IListModel {
+               Encoding utf8 = new UTF8Encoding (false, true);
+               FileStream fs;
+               BinaryReader reader;
+
+               // The offset of the table of entries
+               int table_offset;
+               int entries;
+
+               static public IndexReader Load (string filename)
+               {
+                       if (!File.Exists (filename))
+                               return null;
+
+                       try {
+                               return new IndexReader (filename);
+                       } catch {
+                               return null;
+                       }
+               }
+       
+               IndexReader (string filename)
+               {
+                       fs = File.OpenRead (filename);
+                       reader = new BinaryReader (fs, utf8);
+
+                       if (fs.ReadByte () != 'M' ||
+                           fs.ReadByte () != 'o' ||
+                           fs.ReadByte () != 'i' ||
+                           fs.ReadByte () != 'x'){
+                               throw new Exception ("Corrupt index");
+                       }
+
+                       // Seek to index_entries
+                       fs.Position = reader.ReadInt32 ();
+               
+                       entries = reader.ReadInt32 ();
+
+                       table_offset = (int) fs.Position;
+               }
+
+               public int Rows {
+                       get {
+                               return entries;
+                       }
+               }
+
+               public string GetValue (int row)
+               {
+                       fs.Position = row * 4 + table_offset;
+                       fs.Position = reader.ReadInt32 () + 4;
+                       int code = reader.ReadInt32 ();
+                       fs.Position = code;
+                       string caption = reader.ReadString ();
+
+                       return caption;
+               }
+
+               public string GetDescription (int row)
+               {
+                       return GetValue (row);
+               }
+       
+               public IndexEntry GetIndexEntry (int row)
+               {
+                       fs.Position = row * 4 + table_offset;
+                       int entry_offset = reader.ReadInt32 ();
+                       fs.Position = entry_offset;
+               
+                       return new IndexEntry (fs, reader, entry_offset);
+               }
+       }
+}
+
diff --git a/mcs/tools/monkeydoc/Monkeydoc/providers/addins-provider.cs b/mcs/tools/monkeydoc/Monkeydoc/providers/addins-provider.cs
new file mode 100644 (file)
index 0000000..4db8503
--- /dev/null
@@ -0,0 +1,315 @@
+// addins-provider.cs
+//
+// A provider to display Mono.Addins extension models
+//
+// Author:
+//   Lluis Sanchez Gual <lluis@novell.com>
+//
+// Copyright (c) 2007 Novell, Inc (http://www.novell.com)
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+//
+//
+
+using System;
+using System.Diagnostics;
+using System.Text;
+using System.IO;
+using System.Xml;
+
+namespace Monodoc
+{ 
+
+       //
+       // The simple provider generates the information source
+       //
+       public class AddinsProvider : Provider
+       {
+               string file;
+               
+               public AddinsProvider (string xmlModelFile)
+               {
+                       file = xmlModelFile;
+                       
+                       if (!File.Exists (file))
+                               throw new FileNotFoundException (String.Format ("The file `{0}' does not exist", file));
+               }
+
+               public override void PopulateTree (Tree tree)
+               {
+                       string fileId = tree.tree.HelpSource.PackFile (file);
+                       XmlDocument doc = new XmlDocument ();
+                       doc.Load (file);
+                       
+                       foreach (XmlElement addin in doc.SelectNodes ("Addins/Addin")) {
+
+                               string addinId = addin.GetAttribute ("fullId");
+                               Node newNode = tree.CreateNode (addin.GetAttribute ("name"), "addin:" + fileId + "#" + addinId);
+
+                               foreach (XmlElement node in addin.SelectNodes ("ExtensionPoint")) {
+                                       string target = "extension-point:" + fileId + "#" + addinId + "#" + node.GetAttribute ("path");
+                                       Node newExt = newNode.CreateNode (node.GetAttribute ("name"), target);
+                       
+                                       foreach (XmlElement en in node.SelectNodes ("ExtensionNode")) {
+                                               string nid = en.GetAttribute ("id");
+                                               string nname = en.GetAttribute ("name");
+                                               newExt.CreateNode (nname, "extension-node:" + fileId + "#" + addinId + "#" + nid);
+                                       }
+                               }
+                       }
+               }
+
+
+               public override void CloseTree (HelpSource hs, Tree tree)
+               {
+               }
+       }
+
+       //
+       // The HelpSource is used during the rendering phase.
+       //
+
+       public class AddinsHelpSource : HelpSource
+       {
+               public AddinsHelpSource (string base_file, bool create) : base (base_file, create) 
+               {
+               }
+               
+               protected const string AddinPrefix = "addin:";
+               protected const string ExtensionPrefix = "extension-point:";
+               protected const string ExtensionNodePrefix = "extension-node:";
+               
+               public override string GetText (string url, out Node match_node)
+               {
+                       match_node = null;
+
+                       string c = GetCachedText (url);
+                       if (c != null)
+                               return c;
+
+                       if (url.StartsWith (AddinPrefix))
+                               return GetAddinTextFromUrl (url);
+                       else if (url.StartsWith (ExtensionPrefix))
+                               return GetExtensionTextFromUrl (url);
+                       else if (url.StartsWith (ExtensionNodePrefix))
+                               return GetExtensionNodeTextFromUrl (url);
+
+                       return null;
+               }
+               
+               protected string GetAddinTextFromUrl (string url)
+               {
+                       // Remove "addin:" prefix including any help-source id on the front.
+                       url = url.Substring (AddinPrefix.Length);
+                       int i = url.IndexOf ('#');
+
+                       if (i == -1) {
+                               Message (TraceLevel.Warning, "Warning, NULL url!");
+                               return "<html>Invalid url</html>";
+                       }
+                       
+                       string fileId = url.Substring (0, i);
+                       string addinId = url.Substring (i+1);
+
+                       XmlElement addin = GetAddin (fileId, addinId);
+                       if (addin == null)
+                               return "<html>Add-in not found: " + addinId + "</html>";
+                       
+                       StringBuilder sb = new StringBuilder ("<html>");
+                       sb.Append ("<h1>").Append (addin.GetAttribute ("name")).Append ("</h1>");
+                       XmlElement docs = (XmlElement) addin.SelectSingleNode ("Description");
+                       if (docs != null)
+                               sb.Append (docs.InnerText);
+
+                       sb.Append ("<p><table border=\"1\" cellpadding=\"4\" cellspacing=\"0\">");
+                       sb.AppendFormat ("<tr><td><b>Id</b></td><td>{0}</td></tr>", addin.GetAttribute ("addinId"));
+                       sb.AppendFormat ("<tr><td><b>Namespace</b></td><td>{0}</td></tr>", addin.GetAttribute ("namespace"));
+                       sb.AppendFormat ("<tr><td><b>Version</b></td><td>{0}</td></tr>", addin.GetAttribute ("version"));
+                       sb.Append ("</table></p>");
+                       sb.Append ("<p><b>Extension Points</b>:</p>");
+                       sb.Append ("<ul>");
+                       
+                       foreach (XmlElement ep in addin.SelectNodes ("ExtensionPoint")) {
+                               sb.AppendFormat ("<li><a href=\"extension-point:{0}#{1}#{2}\">{3}</li>", fileId, addinId, ep.GetAttribute ("path"), ep.GetAttribute ("name"));
+                       }
+                       sb.Append ("</ul>");
+                       
+                       sb.Append ("</html>");
+                       return sb.ToString ();
+               }
+               
+               protected string GetExtensionTextFromUrl (string url)
+               {
+                       // Remove "addin:" prefix including any help-source id on the front.
+                       url = url.Substring (ExtensionPrefix.Length);
+                       int i = url.IndexOf ('#');
+
+                       if (i == -1) {
+                               Message (TraceLevel.Warning, "Warning, NULL url!");
+                               return "<html>Invalid url</html>";
+                       }
+                       
+                       string fileId = url.Substring (0, i);
+                       
+                       int j = url.IndexOf ('#', i+1);
+                       string addinId = url.Substring (i+1, j-i-1);
+                       string path = url.Substring (j+1);
+
+                       XmlElement addin = GetAddin (fileId, addinId);
+                       if (addin == null)
+                               return "<html>Add-in not found: " + addinId + "</html>";
+                       
+                       XmlElement ext = (XmlElement) addin.SelectSingleNode ("ExtensionPoint[@path='" + path + "']");
+                       if (ext == null)
+                               return "<html>Extension point not found: " + path + "</html>";
+                       
+                       StringBuilder sb = new StringBuilder ("<html>");
+                       sb.Append ("<h1>").Append (ext.GetAttribute ("name")).Append ("</h1>");
+
+                       path = path.Replace ("/", " <b>/</b> ");
+                       sb.Append ("<p><b>Path</b>: ").Append (path).Append ("</p>");
+                       XmlElement desc = (XmlElement) ext.SelectSingleNode ("Description");
+                       if (desc != null)
+                               sb.Append (desc.InnerText);
+
+                       sb.Append ("<p><b>Extension Nodes</b>:</p>");
+                       sb.Append ("<table border=\"1\" cellpadding=\"4\" cellspacing=\"0\">");
+                       
+                       foreach (XmlElement en in ext.SelectNodes ("ExtensionNode")) {
+                               string nid = en.GetAttribute ("id");
+                               string nname = en.GetAttribute ("name"); 
+                               string sdesc = "";
+                               desc = (XmlElement) en.SelectSingleNode ("Description");
+                               if (desc != null)
+                                       sdesc = desc.InnerText;
+                               
+                               sb.AppendFormat ("<tr><td><a href=\"extension-node:{0}#{1}#{2}\">{3}</td><td>{4}</td></tr>", fileId, addinId, nid, nname, sdesc);
+                       }
+                       sb.Append ("</table>");
+                       
+                       sb.Append ("</html>");
+                       return sb.ToString ();
+               }
+               
+               protected string GetExtensionNodeTextFromUrl (string url)
+               {
+                       // Remove "addin:" prefix including any help-source id on the front.
+                       url = url.Substring (ExtensionNodePrefix.Length);
+                       int i = url.IndexOf ('#');
+
+                       if (i == -1) {
+                               Message (TraceLevel.Warning, "Warning, NULL url!");
+                               return "<html>Invalid url</html>";
+                       }
+                       
+                       string fileId = url.Substring (0, i);
+                       
+                       int j = url.IndexOf ('#', i+1);
+                       string addinId = url.Substring (i+1, j-i-1);
+                       string nodeId = url.Substring (j+1);
+
+                       XmlElement addin = GetAddin (fileId, addinId);
+                       if (addin == null)
+                               return "<html>Add-in not found: " + addinId + "</html>";
+                       
+                       XmlElement node = (XmlElement) addin.SelectSingleNode ("ExtensionNodeType[@id='" + nodeId + "']");
+                       if (node == null)
+                               return "<html>Extension point not found: " + nodeId + "</html>";
+                       
+                       StringBuilder sb = new StringBuilder ("<html>");
+                       sb.Append ("<h1>").Append (node.GetAttribute ("name")).Append ("</h1>");
+                       XmlElement desc = (XmlElement) node.SelectSingleNode ("Description");
+                       if (desc != null)
+                               sb.Append (desc.InnerText);
+
+                       sb.Append ("<p><b>Attributes</b>:</p>");
+                       sb.Append ("<table border=\"1\" cellpadding=\"4\" cellspacing=\"0\"><tr>");
+                       sb.Append ("<td><b>Name</b></td>");
+                       sb.Append ("<td><b>Type</b></td>");
+                       sb.Append ("<td><b>Required</b></td>");
+                       sb.Append ("<td><b>Localizable</b></td>");
+                       sb.Append ("<td><b>Description</b></td>");
+                       sb.Append ("<tr>");
+                       sb.Append ("<td>id</td>");
+                       sb.Append ("<td>System.String</td>");
+                       sb.Append ("<td></td>");
+                       sb.Append ("<td></td>");
+                       sb.Append ("<td>Identifier of the node.</td>");
+                       sb.Append ("</tr>");
+                       
+                       foreach (XmlElement at in node.SelectNodes ("Attributes/Attribute")) {
+                               sb.Append ("<tr>");
+                               sb.AppendFormat ("<td>{0}</td>", at.GetAttribute ("name"));
+                               sb.AppendFormat ("<td>{0}</td>", at.GetAttribute ("type"));
+                               if (at.GetAttribute ("required") == "True")
+                                       sb.Append ("<td>Yes</td>");
+                               else
+                                       sb.Append ("<td></td>");
+                               if (at.GetAttribute ("localizable") == "True")
+                                       sb.Append ("<td>Yes</td>");
+                               else
+                                       sb.Append ("<td></td>");
+                               string sdesc = "";
+                               desc = (XmlElement) at.SelectSingleNode ("Description");
+                               if (desc != null)
+                                       sdesc = desc.InnerText;
+                               
+                               sb.AppendFormat ("<td>{0}</td>", sdesc);
+                               sb.Append ("</tr>");
+                       }
+                       sb.Append ("</table>");
+
+                       XmlNodeList children = node.SelectNodes ("ChildNodes/ExtensionNode");
+                       if (children.Count > 0) {
+                               sb.Append ("<p><b>Child Nodes</b>:</p>");
+                               sb.Append ("<table border=\"1\" cellpadding=\"4\" cellspacing=\"0\">");
+                               
+                               foreach (XmlElement en in children) {
+                                       string nid = en.GetAttribute ("id");
+                                       string nname = en.GetAttribute ("name"); 
+                                       string sdesc = "";
+                                       desc = (XmlElement) en.SelectSingleNode ("Description");
+                                       if (desc != null)
+                                               sdesc = desc.InnerText;
+                                       
+                                       sb.AppendFormat ("<tr><td><a href=\"extension-node:{0}#{1}#{2}\">{3}</td><td>{4}</td></tr>", fileId, addinId, nid, nname, sdesc);
+                               }
+                               sb.Append ("</table>");
+                       }
+                       
+                       sb.Append ("</html>");
+                       return sb.ToString ();
+               }
+               
+               XmlElement GetAddin (string fileId, string addinId)
+               {
+                       Stream s = GetHelpStream (fileId);
+                       StreamReader file;
+                       using (file = new StreamReader (s)) {
+                               XmlDocument doc = new XmlDocument ();
+                               doc.Load (file);
+                               XmlElement addin = (XmlElement) doc.SelectSingleNode ("Addins/Addin[@fullId='" + addinId + "']");
+                               if (addin != null)
+                                       return addin;
+                               else
+                                       return null;
+                       }
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Monkeydoc/providers/ecma-provider.cs b/mcs/tools/monkeydoc/Monkeydoc/providers/ecma-provider.cs
new file mode 100644 (file)
index 0000000..170e10a
--- /dev/null
@@ -0,0 +1,548 @@
+//
+// The ecmaspec provider is for ECMA specifications
+//
+// Authors:
+//     John Luke (jluke@cfl.rr.com)
+//     Ben Maurer (bmaurer@users.sourceforge.net)
+//
+// Use like this:
+//   mono assembler.exe --ecmaspec DIRECTORY --out name
+//
+
+using System;
+using System.Linq;
+using System.IO;
+using System.Text;
+using System.Xml;
+using System.Xml.Linq;
+using System.Collections.Generic;
+
+using Mono.Lucene.Net.Index;
+using Mono.Lucene.Net.Documents;
+
+using Monkeydoc.Ecma;
+using Mono.Utilities;
+
+namespace MonkeyDoc.Providers
+{
+       public enum EcmaNodeType {
+               Invalid,
+               Namespace,
+               Type,
+               Member,
+               Meta, // A node that's here to serve as a header for other node
+       }
+
+       public class EcmaProvider : Provider
+       {
+               string baseDir;
+
+               public EcmaProvider (string baseDir)
+               {
+                       this.baseDir = baseDir;
+               }
+
+               public override void PopulateTree (Tree tree)
+               {
+                       var root = tree.RootNode;
+                       var storage = tree.HelpSource.Storage;
+                       int resID = 0;
+
+                       foreach (var asm in Directory.EnumerateDirectories (baseDir)) {
+                               using (var reader = XmlReader.Create (File.OpenRead (Path.Combine (asm, "index.xml")))) {
+                                       reader.ReadToFollowing ("Types");
+                                       var types = XElement.Load (reader.ReadSubtree ());
+
+                                       foreach (var ns in types.Elements ("Namespace")) {
+                                               var nsNode = root.GetOrCreateNode (ns.Attribute ("Name").Value, "N:" + ns.Attribute ("Name").Value);
+
+                                               foreach (var type in ns.Elements ("Type")) {
+                                                       // Add the XML file corresponding to the type to our storage
+                                                       var id = resID++;
+                                                       using (var file = File.OpenRead (Path.Combine (asm, nsNode.Caption, type.Attribute ("Name").Value)))
+                                                               storage.Store (id.ToString (), file);
+
+                                                       var url = "ecma:" + id + type.Attribute ("Name").Value;
+                                                       var typeNode = nsNode.CreateNode ((string)(type.Attribute ("DisplayName") ?? type.Attribute ("Name")),
+                                                                                         url);
+
+                                                       // Add meta "Members" node
+                                                       typeNode.CreateNode ("Members", "*");
+                                                       var members = type.Element ("Members").Elements ("Member").ToLookup (m => m.Element ("MemberType").Value);
+                                                       foreach (var memberType in members) {
+                                                               // We pluralize the member type to get the caption and take the first letter as URL
+                                                               var node = typeNode.CreateNode (memberType.Key + 's', memberType.Key[0].ToString ());
+                                                               int memberIndex = 0;
+                                                               // We do not escape much member name here
+                                                               foreach (var member in memberType)
+                                                                       node.CreateNode (MakeMemberCaption (member), (memberIndex++).ToString ());
+                                                       }
+                                               }
+
+                                               nsNode.Sort ();
+                                       }
+
+                                       root.Sort ();
+                               }
+                       }
+               }
+
+               string MakeMemberCaption (XElement member)
+               {
+                       var caption = (string)member.Attribute ("MemberName");
+                       var args = member.Element ("Parameters");
+                       if (args != null) {
+                               caption += '(';
+                               caption += args.Elements ("Parameter")
+                                              .Select (p => (string)p.Attribute ("Type"))
+                                              .Aggregate ((p1, p2) => p1 + "," + p2);
+                               caption += ')';
+                       }
+                       
+                       return caption;
+               }
+
+               public override void CloseTree (HelpSource hs, Tree tree)
+               {
+                       AddImages (hs);
+                       AddExtensionMethods (hs);
+               }
+
+               void AddEcmaXml (HelpSource hs)
+               {
+                       var xmls = Directory.EnumerateDirectories (baseDir) // Assemblies
+                               .SelectMany (Directory.EnumerateDirectories) // Namespaces
+                               .SelectMany (Directory.EnumerateFiles)
+                               .Where (f => f.EndsWith (".xml")); // Type XML files
+
+                       int resID = 0;
+                       foreach (var xml in xmls)
+                               using (var file = File.OpenRead (xml))
+                                       hs.Storage.Store ((resID++).ToString (), file);
+               }
+
+               void AddImages (HelpSource hs)
+               {
+                       var imgs = Directory.EnumerateDirectories (baseDir)
+                               .Select (d => Path.Combine (d, "_images"))
+                               .Where (Directory.Exists)
+                               .SelectMany (Directory.EnumerateFiles);
+
+                       foreach (var img in imgs)
+                               using (var file = File.OpenRead (img))
+                                       hs.Storage.Store (Path.GetFileName (img), file);
+               }
+
+               void AddExtensionMethods (HelpSource hs)
+               {
+                       var extensionMethods = Directory.EnumerateDirectories (baseDir)
+                               .Select (d => Path.Combine (d, "index.xml"))
+                               .Where (File.Exists)
+                               .Select (f => {
+                                       using (var file = File.OpenRead (f)) {
+                                               var reader = XmlReader.Create (file);
+                                               reader.ReadToFollowing ("ExtensionMethods");
+                                               return reader.ReadInnerXml ();
+                                       }
+                               });
+
+                       hs.Storage.Store ("ExtensionMethods.xml",
+                                         "<ExtensionMethods>" + extensionMethods.Aggregate (string.Concat) + "</ExtensionMethods>");
+               }
+
+               IEnumerable<string> GetEcmaXmls ()
+               {
+                       return Directory.EnumerateDirectories (baseDir) // Assemblies
+                               .SelectMany (Directory.EnumerateDirectories) // Namespaces
+                               .SelectMany (Directory.EnumerateFiles)
+                               .Where (f => f.EndsWith (".xml")); // Type XML files
+               }
+       }
+
+       public class EcmaHelpSource : HelpSource
+       {
+               const string EcmaPrefix = "ecma:";
+               EcmaUrlParser parser = new EcmaUrlParser ();
+               LRUCache<string, Node> cache = new LRUCache<string, Node> (4);
+
+               public EcmaHelpSource (string base_file, bool create) : base (base_file, create)
+               {
+               }
+
+               protected override string UriPrefix {
+                       get {
+                               return EcmaPrefix;
+                       }
+               }
+
+               public override bool CanHandleUrl (string url)
+               {
+                       if (url.Length > 2 && url[1] == ':') {
+                               switch (url[0]) {
+                               case 'T':
+                               case 'M':
+                               case 'C':
+                               case 'P':
+                               case 'E':
+                               case 'F':
+                               case 'N':
+                               case 'O':
+                                       return MatchNode (url) != null;
+                               }
+                       }
+                       return base.CanHandleUrl (url);
+               }
+
+               public override DocumentType GetDocumentTypeForId (string id, out Dictionary<string, string> extraParams)
+               {
+                       extraParams = null;
+                       int interMark = id.LastIndexOf ('?');
+                       if (interMark != -1)
+                               extraParams = id.Substring (interMark)
+                                       .Split ('&')
+                                       .Select (nvp => {
+                                               var eqIdx = nvp.IndexOf ('=');
+                                               return new { Key = nvp.Substring (0, eqIdx < 0 ? nvp.Length : eqIdx), Value = nvp.Substring (eqIdx + 1) };
+                                       })
+                                       .ToDictionary (kvp => kvp.Key, kvp => kvp.Value );
+
+                       return DocumentType.EcmaXml;
+               }
+
+               public override string GetPublicUrl (Node node)
+               {
+                       string url = string.Empty;
+                       var type = GetNodeType (node);
+                       //Console.WriteLine ("GetPublicUrl {0} : {1} [{2}]", node.Element, node.Caption, type.ToString ());
+                       switch (type) {
+                       case EcmaNodeType.Namespace:
+                               return node.Element; // A namespace node has already a well formated internal url
+                       case EcmaNodeType.Type:
+                               return MakeTypeNodeUrl (node);
+                       case EcmaNodeType.Meta:
+                               return MakeTypeNodeUrl (GetNodeTypeParent (node)) + GenerateMetaSuffix (node);
+                       case EcmaNodeType.Member:
+                               var typeChar = GetNodeMemberTypeChar (node);
+                               var parentNode = GetNodeTypeParent (node);
+                               var typeNode = MakeTypeNodeUrl (parentNode).Substring (2);
+                               return typeChar + ":" + typeNode + MakeMemberNodeUrl (typeChar, node);
+                       default:
+                               return null;
+                       }
+               }
+
+               string MakeTypeNodeUrl (Node node)
+               {
+                       // A Type node has a Element property of the form: 'ecma:{number}#{typename}/'
+                       var hashIndex = node.Element.IndexOf ('#');
+                       var typeName = node.Element.Substring (hashIndex + 1, node.Element.Length - hashIndex - 2);
+                       return "T:" + node.Parent.Caption + '.' + typeName.Replace ('.', '+');
+               }
+
+               string MakeMemberNodeUrl (char typeChar, Node node)
+               {
+                       // We clean inner type ctor name which may contain the outer type name
+                       var caption = node.Caption;
+
+                       // Sanitize constructor caption of inner types
+                       if (typeChar == 'C') {
+                               int lastDot = -1;
+                               for (int i = 0; i < caption.Length && caption[i] != '('; i++)
+                                       lastDot = caption[i] == '.' ? i : lastDot;
+                               return lastDot == -1 ? '.' + caption : caption.Substring (lastDot);
+                       }
+
+                       /* We handle type conversion operator by checking if the name contains " to "
+                        * (as in 'foo to bar') and we generate a corresponding conversion signature
+                        */
+                       if (typeChar == 'O' && caption.IndexOf (" to ") != -1) {
+                               var parts = caption.Split (' ');
+                               return "." + node.Parent.Caption + "(" + parts[0] + ", " + parts[2] + ")";
+                       }
+
+                       /* The goal here is to treat method which are explicit interface definition
+                        * such as 'void IDisposable.Dispose ()' for which the caption is a dot
+                        * expression thus colliding with the ecma parser.
+                        * If the first non-alpha character in the caption is a dot then we have an
+                        * explicit member implementation (we assume the interface has namespace)
+                        */
+                       var firstNonAlpha = caption.FirstOrDefault (c => !char.IsLetterOrDigit (c));
+                       if (firstNonAlpha == '.')
+                               return "$" + caption;
+
+                       return "." + caption;
+               }
+
+               EcmaNodeType GetNodeType (Node node)
+               {
+                       // We guess the node type by checking the depth level it's at in the tree
+                       int level = GetNodeLevel (node);
+                       switch (level) {
+                       case 0:
+                               return EcmaNodeType.Namespace;
+                       case 1:
+                               return EcmaNodeType.Type;
+                       case 2:
+                               return EcmaNodeType.Meta;
+                       case 3: // Here it's either a member or, in case of overload, a meta
+                               return node.IsLeaf ? EcmaNodeType.Member : EcmaNodeType.Meta;
+                       case 4: // At this level, everything is necessarily a member
+                               return EcmaNodeType.Member;
+                       default:
+                               return EcmaNodeType.Invalid;
+                       }
+               }
+
+               int GetNodeLevel (Node node)
+               {
+                       int i = 0;
+                       for (; !node.Element.StartsWith ("root:/"); i++) {
+                               //Console.WriteLine ("\tLevel {0} : {1} {2}", i, node.Element, node.Caption);
+                               node = node.Parent;
+                       }
+                       return i - 1;
+               }
+
+               char GetNodeMemberTypeChar (Node node)
+               {
+                       int level = GetNodeLevel (node);
+                       // Only methods/operators can be under a meta node, so in case the member level is
+                       // deeper than normal (which indicate an overload meta), return 'M' directly
+                       return level == 3 ? node.Parent.Element[0] : node.Parent.Parent.Element[0];
+               }
+
+               Node GetNodeTypeParent (Node node)
+               {
+                       // Type nodes are always at level 2 so we just need to get there
+                       while (node != null && node.Parent != null && !node.Parent.Parent.Element.StartsWith ("root:/"))
+                               node = node.Parent;
+                       return node;
+               }
+
+               string GenerateMetaSuffix (Node node)
+               {
+                       string suffix = string.Empty;
+                       // A meta node has always a type element to begin with
+                       while (GetNodeType (node) != EcmaNodeType.Type) {
+                               suffix = '/' + node.Element + suffix;
+                               node = node.Parent;
+                       }
+                       return suffix;
+               }
+
+               public override string GetInternalIdForUrl (string url, out Node node)
+               {
+                       var id = string.Empty;
+                       node = null;
+
+                       if (!url.StartsWith (EcmaPrefix)) {
+                               node = MatchNode (url);
+                               if (node == null)
+                                       return null;
+                               id = node.GetInternalUrl ();
+                       }
+
+                       if (id.StartsWith (UriPrefix))
+                               id = id.Substring (UriPrefix.Length);
+                       else if (id.StartsWith ("N:"))
+                               id = "xml.summary." + id.Substring ("N:".Length);
+
+                       var hashIndex = id.IndexOf ('#');
+                       var hash = string.Empty;
+                       if (hashIndex != -1) {
+                               hash = id.Substring (hashIndex + 1);
+                               id = id.Substring (0, hashIndex);
+                       }
+
+                       return id + GetArgs (hash, node);
+               }
+
+               public override Node MatchNode (string url)
+               {
+                       Node node = null;
+                       if ((node = cache.Get (url)) == null) {
+                               node = InternalMatchNode (url);
+                               if (node != null)
+                                       cache.Put (url, node);
+                       }
+                       return node;
+               }
+
+               public Node InternalMatchNode (string url)
+               {
+                       Node result = null;
+                       //Console.WriteLine ("Ecma-hs MatchNode with {0}", url);
+                       EcmaDesc desc;
+                       if (!parser.TryParse (url, out desc))
+                               return null;
+
+                       //Console.WriteLine ("EcmaDesc: {0}", desc.ToString ());
+                       // Namespace search
+                       Node currentNode = Tree.RootNode;
+                       Node searchNode = new Node () { Caption = desc.Namespace };
+                       int index = currentNode.Nodes.BinarySearch (searchNode, EcmaGenericNodeComparer.Instance);
+                       if (index >= 0)
+                               result = currentNode.Nodes[index];
+                       if (desc.DescKind == EcmaDesc.Kind.Namespace || index < 0)
+                               return result;
+
+                       //Console.WriteLine ("Post NS");
+
+                       // Type search
+                       currentNode = result;
+                       result = null;
+                       searchNode.Caption = desc.ToCompleteTypeName ();
+                       //Console.WriteLine ("Type search: {0}", searchNode.Caption);
+                       index = currentNode.Nodes.BinarySearch (searchNode, EcmaTypeNodeComparer.Instance);
+                       if (index >= 0)
+                               result = currentNode.Nodes[index];
+                       if ((desc.DescKind == EcmaDesc.Kind.Type && !desc.IsEtc) || index < 0)
+                               return result;
+
+                       //Console.WriteLine ("Post Type");
+
+                       // Member selection
+                       currentNode = result;
+                       result = null;
+                       var caption = desc.IsEtc ? EtcKindToCaption (desc.Etc) : MemberKindToCaption (desc.DescKind);
+                       currentNode = FindNodeForCaption (currentNode.Nodes, caption);
+                       if (currentNode == null 
+                           || (desc.IsEtc && desc.DescKind == EcmaDesc.Kind.Type && string.IsNullOrEmpty (desc.EtcFilter)))
+                               return currentNode;
+
+                       //Console.WriteLine ("Post caption");
+
+                       // Member search
+                       result = null;
+                       var format = desc.DescKind == EcmaDesc.Kind.Constructor ? EcmaDesc.Format.WithArgs : EcmaDesc.Format.WithoutArgs;
+                       searchNode.Caption = desc.ToCompleteMemberName (format);
+                       //Console.WriteLine ("Member caption {0}", searchNode.Caption);
+                       index = currentNode.Nodes.BinarySearch (searchNode, EcmaGenericNodeComparer.Instance);
+                       if (index < 0) {
+                               //foreach (var n in currentNode.Nodes)
+                               //      Console.WriteLine (n.Caption);
+                               return null;
+                       }
+                       result = currentNode.Nodes[index];
+                       //Console.WriteLine ("Member result: {0} {1} {2}", result.Caption, result.Nodes.Count, desc.IsEtc);
+                       if (result.Nodes.Count == 0 || desc.IsEtc)
+                               return result;
+
+                       //Console.WriteLine ("Post member");
+
+                       // Overloads search
+                       currentNode = result;
+                       searchNode.Caption = desc.ToCompleteMemberName (EcmaDesc.Format.WithArgs);
+                       //Console.WriteLine ("Overload caption: {0}", searchNode.Caption);
+                       //Console.WriteLine ("Candidates: {0}", string.Join (", ", currentNode.Nodes.Select (n => n.Caption)));
+                       index = currentNode.Nodes.BinarySearch (searchNode, EcmaGenericNodeComparer.Instance);
+                       if (index < 0)
+                               return result;
+                       result = result.Nodes[index];
+                       
+                       return result;
+               }
+
+               // This comparer returns the answer straight from caption comparison
+               class EcmaGenericNodeComparer : IComparer<Node>
+               {
+                       public static readonly EcmaGenericNodeComparer Instance = new EcmaGenericNodeComparer ();
+
+                       public int Compare (Node n1, Node n2)
+                       {
+                               /*var result = string.Compare (n1.Caption, n2.Caption, StringComparison.OrdinalIgnoreCase);
+                               Console.WriteLine ("{0} {2} {1}", n1.Caption, n2.Caption, result == 0 ? '=' : result < 0 ? '<' : '>');
+                               return result;*/
+                               return string.Compare (n1.Caption, n2.Caption, StringComparison.OrdinalIgnoreCase);
+                       }
+               }
+
+               // This comparer take into account the space in the caption
+               class EcmaTypeNodeComparer : IComparer<Node>
+               {
+                       public static readonly EcmaTypeNodeComparer Instance = new EcmaTypeNodeComparer ();
+
+                       public int Compare (Node n1, Node n2)
+                       {
+                               return string.Compare (Clear (n1.Caption), Clear (n2.Caption), StringComparison.OrdinalIgnoreCase);
+                       }
+
+                       string Clear (string caption)
+                       {
+                               int lastSpace = caption.LastIndexOf (' ');
+                               return lastSpace == -1 ? caption : caption.Substring (0, lastSpace);
+                       }
+               }
+
+               string EtcKindToCaption (char etc)
+               {
+                       switch (etc) {
+                       case 'M':
+                               return "Methods";
+                       case 'P':
+                               return "Properties";
+                       case 'C':
+                               return "Constructors";
+                       case 'F':
+                               return "Fields";
+                       case 'E':
+                               return "Events";
+                       case 'O':
+                               return "Operators";
+                       case '*':
+                               return "Members";
+                       default:
+                               return null;
+                       }
+               }
+
+               string MemberKindToCaption (EcmaDesc.Kind kind)
+               {
+                       switch (kind) {
+                       case EcmaDesc.Kind.Method:
+                               return "Methods";
+                       case EcmaDesc.Kind.Property:
+                               return "Properties";
+                       case EcmaDesc.Kind.Constructor:
+                               return "Constructors";
+                       case EcmaDesc.Kind.Field:
+                               return "Fields";
+                       case EcmaDesc.Kind.Event:
+                               return "Events";
+                       case EcmaDesc.Kind.Operator:
+                               return "Operators";
+                       default:
+                               return null;
+                       }
+               }
+
+               Node FindNodeForCaption (List<Node> nodes, string caption)
+               {
+                       foreach (var node in nodes)
+                               if (node.Caption.Equals (caption, StringComparison.Ordinal))
+                                       return node;
+                       return null;
+               }
+
+               string GetArgs (string hash, Node node)
+               {
+                       var args = new Dictionary<string, string> ();
+                       
+                       args["source-id"] = SourceID.ToString ();
+                       
+                       if (node != null) {
+                               switch (GetNodeType (node)) {
+                               case EcmaNodeType.Namespace:
+                                       args["show"] = "namespace";
+                                       args["namespace"] =  node.Element.Substring ("N:".Length);
+                                       break;
+                               }
+                       }
+
+                       if (!string.IsNullOrEmpty (hash))
+                               args["hash"] = hash;
+
+                       return "?" + string.Join ("&", args.Select (kvp => kvp.Key == kvp.Value ? kvp.Key : kvp.Key + '=' + kvp.Value));
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Monkeydoc/providers/ecmaspec-provider.cs b/mcs/tools/monkeydoc/Monkeydoc/providers/ecmaspec-provider.cs
new file mode 100644 (file)
index 0000000..5e28eb8
--- /dev/null
@@ -0,0 +1,194 @@
+//
+// The ecmaspec provider is for ECMA specifications
+//
+// Authors:
+//     John Luke (jluke@cfl.rr.com)
+//     Ben Maurer (bmaurer@users.sourceforge.net)
+//
+// Use like this:
+//   mono assembler.exe --ecmaspec DIRECTORY --out name
+//
+
+using System;
+using System.Diagnostics;
+using System.IO;
+using System.Text;
+using System.Xml.XPath;
+using System.Xml.Xsl;
+using System.Xml;
+using System.Collections.Generic;
+using Mono.Lucene.Net.Index;
+using Mono.Lucene.Net.Documents;
+
+namespace MonkeyDoc.Providers
+{
+       public class EcmaSpecProvider : Provider
+       {
+               string basedir;
+       
+               public EcmaSpecProvider (string base_directory)
+               {
+                       basedir = base_directory;
+                       if (!Directory.Exists (basedir))
+                               throw new DirectoryNotFoundException (String.Format ("The directory `{0}' does not exist", basedir));
+               }
+       
+               public override void PopulateTree (Tree tree)
+               {
+                       XPathNavigator n = new XPathDocument (Path.Combine (basedir, "toc.xml")).CreateNavigator ();
+                       n.MoveToRoot ();
+                       n.MoveToFirstChild ();
+                       PopulateNode (n.SelectChildren ("node", ""), tree.RootNode);
+               }
+       
+               void PopulateNode (XPathNodeIterator nodes, Node treeNode)
+               {
+                       foreach (XPathNavigator n in nodes) {
+                               string secNumber = n.GetAttribute ("number", "");
+                               string secName = n.GetAttribute ("name", "");
+
+                               var storage = treeNode.Tree.HelpSource.Storage;
+                               using (var file = File.OpenRead (Path.Combine (basedir, secNumber + ".xml")))
+                                       storage.Store (secNumber, file);
+
+                               Node thisNode = treeNode.GetOrCreateNode (secNumber + ": " + secName, "ecmaspec:" + secNumber);
+                       
+                               if (n.HasChildren)
+                                       PopulateNode (n.SelectChildren ("node", ""), thisNode);
+                       }
+               }
+
+               public override void CloseTree (HelpSource hs, Tree tree)
+               {
+               }
+       }
+
+       public class EcmaSpecHelpSource : HelpSource
+       {
+               const string EcmaspecPrefix = "ecmaspec:";
+               const string TocPart = "%toc"; // What is returned as TocXml
+               const string SpecPart = "%spec"; // What is returned as Ecmaspec
+
+               public EcmaSpecHelpSource (string base_file, bool create) : base (base_file, create)
+               {
+               }
+
+               public override DocumentType GetDocumentTypeForId (string id, out Dictionary<string, string> extraParams)
+               {
+                       extraParams = null;
+                       return id.EndsWith (TocPart) ? DocumentType.TocXml : DocumentType.EcmaSpecXml;
+               }
+
+               public override bool IsGeneratedContent (string id)
+               {
+                       return id == "root:" || id.EndsWith (TocPart);
+               }
+
+               public override bool IsMultiPart (string id, out IEnumerable<string> parts)
+               {
+                       if (id == "root:" || id.EndsWith (TocPart) || id.EndsWith (SpecPart)) {
+                               parts = null;
+                               return false;
+                       }
+                       parts = MakeMultiPart (id);
+                       return true;
+               }
+
+               IEnumerable<string> MakeMultiPart (string baseId)
+               {
+                       yield return baseId + SpecPart;
+                       yield return baseId + TocPart;
+               }
+
+               public override string GetText (string id)
+               {
+                       Node n = id == "root:" ? Tree.RootNode : MatchNode (EcmaspecPrefix + id.Substring (0, id.Length - TocPart.Length));
+                       if (n == null)
+                               throw new ArgumentException ("id", string.Format ("{0} -> {1}", id, EcmaspecPrefix + id.Substring (0, id.Length - TocPart.Length)));
+                       return TreeDumper.ExportToTocXml (n, "C# Language Specification", "In this section:");
+               }
+
+               public override Stream GetHelpStream (string id)
+               {
+                       return id.EndsWith (SpecPart) ? base.GetHelpStream (id.Substring (0, id.IndexOf (SpecPart))) : base.GetHelpStream (id);
+               }
+       
+               public override void PopulateSearchableIndex (IndexWriter writer) 
+               {
+                       foreach (Node n in Tree.RootNode.Nodes)
+                               AddDocuments (writer, n);
+               }
+
+               protected override string UriPrefix {
+                       get {
+                               return EcmaspecPrefix;
+                       }
+               }
+
+               void AddDocuments (IndexWriter writer, Node node) 
+               {
+                       string url = node.PublicUrl;
+                       Stream file_stream = GetHelpStream (url.Substring (9));
+                       if (file_stream == null) //Error
+                               return;
+                       XmlDocument xdoc = new XmlDocument ();
+                       xdoc.Load (new XmlTextReader (file_stream));
+
+                       //Obtain the title
+                       XmlNode nelem = xdoc.DocumentElement;
+                       string title = nelem.Attributes["number"].Value + ": " + nelem.Attributes["title"].Value;
+
+                       //Obtain the text
+                       StringBuilder s = new StringBuilder ();
+                       GetTextNode (nelem, s);
+                       string text = s.ToString ();
+
+                       //Obtain the examples
+                       StringBuilder s2 = new StringBuilder ();
+                       GetExamples (nelem, s2);
+                       string examples = s2.ToString ();
+
+                       //Write to the Lucene Index all the parts
+                       SearchableDocument doc = new SearchableDocument ();
+                       doc.title = title;
+                       doc.hottext = title.Substring (title.IndexOf (':')); 
+                       doc.url = url;
+                       doc.text = text;
+                       doc.examples = examples;
+                       writer.AddDocument (doc.LuceneDoc);
+               
+                       if (node.IsLeaf)
+                               return;
+
+                       foreach (Node n in node.Nodes)
+                               AddDocuments (writer, n);
+               }
+
+               void GetTextNode (XmlNode n, StringBuilder s) 
+               {
+                       //dont include c# code
+                       if (n.Name == "code_example")
+                               return;
+                       //include all text from nodes
+                       if (n.NodeType == XmlNodeType.Text)
+                               s.Append (n.Value);
+               
+                       //recursively explore all nodes
+                       if (n.HasChildNodes)
+                               foreach (XmlNode n_child in n.ChildNodes)
+                                       GetTextNode (n_child, s);
+               }
+
+               void GetExamples (XmlNode n, StringBuilder s)
+               {
+                       if (n.Name == "code_example") {
+                               if (n.FirstChild.Name == "#cdata-section")
+                                       s.Append (n.FirstChild.Value);
+                       } else {
+                               if (n.HasChildNodes)
+                                       foreach (XmlNode n_child in n.ChildNodes)
+                                               GetExamples (n_child, s);
+                       }
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Monkeydoc/providers/error-provider.cs b/mcs/tools/monkeydoc/Monkeydoc/providers/error-provider.cs
new file mode 100644 (file)
index 0000000..8151522
--- /dev/null
@@ -0,0 +1,201 @@
+//
+// error-provider.cs
+//
+// Author:
+//   Ben Maurer (bmaurer@users.sourceforge.net)
+//
+// (C) 2003 Ben Maurer
+// Copyright 2003-2011 Novell
+// Copyright 2011 Xamarin Inc
+//
+
+using System;
+using System.Collections.Generic;
+using System.IO;
+using System.Text;
+using System.Xml;
+using System.Xml.Serialization;
+using System.Linq;
+using Mono.Lucene.Net.Index;
+using Mono.Lucene.Net.Documents;
+
+namespace MonkeyDoc.Providers
+{
+       public class ErrorProviderConfig
+       {
+               public string FilesPath;
+               public string Match;
+               public int ErrorNumSubstringStart;
+               public int ErrorNumSubstringLength;
+               public string FriendlyFormatString;
+
+               public override string ToString ()
+               {
+                       var sb = new StringBuilder ();
+                       var w = new StringWriter (sb);
+                       
+                       w.WriteLine ("FilesPath: {0}", FilesPath);
+                       w.WriteLine ("Match: {0}", Match);
+                       w.WriteLine ("Error Number Substring: {0} Length:{1}", ErrorNumSubstringStart, ErrorNumSubstringLength);
+                       w.WriteLine ("FriendlyFormatString: {0}", FriendlyFormatString);
+                       
+                       return w.ToString ();
+               }
+               
+               public Dictionary<string, ErrorDocumentation> Compile (HelpSource hs)
+               {
+                       string[] files = Directory.GetFiles (FilesPath, Match);
+                       var ret = new Dictionary<string, ErrorDocumentation> ();
+                       
+                       foreach (string s in files) {
+                               ErrorDocumentation d;
+                               int errorNum = 0;
+
+                               try {
+                                       errorNum = int.Parse (Path.GetFileName (s).Substring (ErrorNumSubstringStart, ErrorNumSubstringLength));
+                               } catch {
+                                       Console.WriteLine ("Ignoring file {0}", s);
+                               }
+                               
+                               string errorName = String.Format (FriendlyFormatString, errorNum);
+                               
+                               if (!ret.TryGetValue (errorName, out d))
+                                       ret[errorName] = d = new ErrorDocumentation (errorName);
+
+                               if (d.Details == null) {
+                                       string xmlFile = Path.ChangeExtension (s, "xml");
+                                       if (File.Exists (xmlFile)) {
+                                               XmlSerializer cfgRdr = new XmlSerializer (typeof (ErrorDetails));
+                                               d.Details = (ErrorDetails)cfgRdr.Deserialize (new XmlTextReader (xmlFile));
+                                       }
+                               }
+                               // Encoding is same as used in MCS, so we will be able to do all those files
+                               using (StreamReader reader = new StreamReader (s, Encoding.GetEncoding (28591))) {
+                                       d.Examples.Add (reader.ReadToEnd ());
+                               }
+                       }
+                       
+                       return ret;
+               }
+       }
+
+       public class ErrorDocumentation
+       {
+               public string ErrorName;
+               public ErrorDetails Details;
+               public List<string> Examples = new List<string> ();
+               
+               public ErrorDocumentation () {}
+               public ErrorDocumentation (string ErrorName)
+               {
+                       this.ErrorName = ErrorName;
+               }
+       }
+       
+       public class ErrorDetails
+       {
+               public XmlNode Summary;
+               public XmlNode Details;
+       }
+
+       public class ErrorProvider : Provider
+       {
+               ErrorProviderConfig config;
+               
+               public ErrorProvider (string configFile)
+               {
+                       config = ReadConfig (configFile);
+               }
+               
+               public static ErrorProviderConfig ReadConfig (string file)
+               {
+                       XmlSerializer cfgRdr = new XmlSerializer (typeof (ErrorProviderConfig));
+                       ErrorProviderConfig ret = (ErrorProviderConfig)cfgRdr.Deserialize (new XmlTextReader (file));
+                       // handle path rel to the config file
+                       ret.FilesPath = Path.Combine (Path.GetDirectoryName (file), ret.FilesPath);
+                       return ret;
+               }
+       
+               public override void PopulateTree (Tree tree)
+               {
+                       // everything is done in CloseTree so we can pack
+               }
+       
+               public override void CloseTree (HelpSource hs, Tree tree)
+               {
+                       var entries = config.Compile (hs);
+                       MemoryStream ms = new MemoryStream ();
+                       XmlSerializer writer = new XmlSerializer (typeof (ErrorDocumentation));
+                       
+                       foreach (var de in entries) {
+                               ErrorDocumentation d = de.Value;
+                               string s = de.Key;
+
+                               tree.RootNode.GetOrCreateNode (s, "error:" + s);
+                               
+                               writer.Serialize (ms, d);
+                               ms.Position = 0;
+                               hs.Storage.Store (s, ms);
+                               ms.SetLength (0);
+                       }
+                       
+                       tree.RootNode.Sort ();
+               }
+       }
+       
+       public class ErrorHelpSource : HelpSource
+       {               
+               public ErrorHelpSource (string base_file, bool create) : base (base_file, create)
+               {
+               }
+
+               public override string GetText (string id)
+               {
+                       return TreeDumper.ExportToTocXml (Tree.RootNode, "Compiler Error Reference", "In this section:");
+               }
+               
+               protected override string UriPrefix {
+                       get {
+                               return "error:";
+                       }
+               }
+
+               public override bool IsGeneratedContent (string id)
+               {
+                       return id == "root:";
+               }
+
+               public override DocumentType GetDocumentTypeForId (string id, out Dictionary<string, string> extraParams)
+               {
+                       extraParams = null;
+                       return id == "root:" ? DocumentType.TocXml : DocumentType.ErrorXml;
+               }
+
+               public override string GetInternalIdForUrl (string url, out Node node)
+               {
+                       var result = base.GetInternalIdForUrl (url, out node);
+                       return result.ToLower ();
+               }
+               
+               public override void PopulateIndex (IndexMaker index_maker)
+               {
+                       foreach (Node n in Tree.RootNode.Nodes)
+                               index_maker.Add (n.Caption, n.Caption, n.Element);
+               }
+
+               public override void PopulateSearchableIndex (IndexWriter writer) 
+               {
+                       foreach (Node n in Tree.RootNode.Nodes) {
+                               XmlSerializer reader = new XmlSerializer (typeof (ErrorDocumentation));
+                               ErrorDocumentation d = (ErrorDocumentation)reader.Deserialize (GetHelpStream (n.Element.Substring (6)));
+                               SearchableDocument doc = new SearchableDocument ();
+                               doc.title = d.ErrorName;
+                               doc.url = n.Element;
+                               doc.text = d.Details != null ? d.Details.ToString () : string.Empty;
+                               doc.examples = d.Examples.Cast<string> ().Aggregate ((e1, e2) => e1 + Environment.NewLine + e2);
+                               doc.hottext = d.ErrorName;
+                               writer.AddDocument (doc.LuceneDoc);
+                       }
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Monkeydoc/providers/man-provider.cs b/mcs/tools/monkeydoc/Monkeydoc/providers/man-provider.cs
new file mode 100755 (executable)
index 0000000..32b9c93
--- /dev/null
@@ -0,0 +1,106 @@
+//
+// A provider to display man pages
+//
+// Authors:
+//   Johannes Roith <johannes@roith.de>
+//   Jonathan Pryor <jpryor@novell.com>
+//
+// (C) 2008 Novell, Inc.
+
+using System;
+using System.IO;
+using System.Text;
+using System.Xml;
+using System.Linq;
+using System.Collections.Generic;
+
+namespace MonkeyDoc.Providers
+{
+       public class ManProvider : Provider
+       {
+               string[] tocFiles;
+       
+               public ManProvider (string[] handbookTocFiles)
+               {
+                       tocFiles = handbookTocFiles;
+
+                       // huh...
+                       if (!File.Exists (tocFiles[0]))
+                               throw new FileNotFoundException (String.Format ("The table of contents, `{0}' does not exist", tocFiles[0]));
+               }
+
+               public override void PopulateTree (Tree tree)
+               {
+                       foreach(string TocFile in tocFiles) {
+                               XmlDocument doc = new XmlDocument();
+                               doc.Load (TocFile);
+
+                               XmlNodeList nodeList = doc.GetElementsByTagName("manpage");
+                               Node nodeToAddChildrenTo = tree.RootNode;
+                               var storage = nodeToAddChildrenTo.Tree.HelpSource.Storage;
+
+                               foreach (XmlNode node in nodeList) {
+
+                                       XmlAttribute name = node.Attributes["name"];
+                                       XmlAttribute page = node.Attributes["page"];
+
+                                       if (name == null || page == null) continue;
+
+                                       if (!File.Exists (page.Value))
+                                               continue;
+
+                                       string target = "man:" + name.Value;
+                                       nodeToAddChildrenTo.CreateNode (name.Value, target);
+
+                                       if (File.Exists (page.Value))
+                                               storage.Store (name.Value, File.OpenRead (page.Value));
+                               }
+                       }
+               }
+
+               public override void CloseTree (HelpSource hs, Tree tree)
+               {
+               }
+       }
+
+       public class ManHelpSource : HelpSource
+       {
+               const string ManPrefix = "man:";
+               Dictionary<string, Node> nodesMap;
+
+               public ManHelpSource (string base_file, bool create) : base (base_file, create)
+               {
+                       nodesMap = Tree.RootNode.Nodes.ToDictionary (n => n.Element);
+               }
+
+               // Since man always has a flat tree and rather small amount of item
+               // we store them in a dictionary
+               public override Node MatchNode (string url)
+               {
+                       Node result;
+                       return nodesMap.TryGetValue (url, out result) ? result : null;
+               }
+
+               public override DocumentType GetDocumentTypeForId (string id, out Dictionary<string, string> extraParams)
+               {
+                       extraParams = null;
+                       return id == "root:" ? DocumentType.TocXml : DocumentType.Man;
+               }
+
+               public override bool IsGeneratedContent (string id)
+               {
+                       return id == "root:";
+               }
+       
+               public override string GetText (string url)
+               {
+                       return TreeDumper.ExportToTocXml (Tree.RootNode, "Mono Documentation Library", "Available man pages:");
+               }
+
+               protected override string UriPrefix {
+                       get {
+                               return ManPrefix;
+                       }
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Monkeydoc/providers/simple-provider.cs b/mcs/tools/monkeydoc/Monkeydoc/providers/simple-provider.cs
new file mode 100644 (file)
index 0000000..e567dc1
--- /dev/null
@@ -0,0 +1,153 @@
+//
+// The simple provider is an example provider
+//
+// Author:
+//   Miguel de Icaza (miguel@ximian.com)
+//
+// Use like this:
+//   mono assembler.exe --simple DIRECTORY --out name
+//
+// Then create a .source file in your sources directory, and copy
+// name.tree and name.zip to the sources directory.
+//
+// To view the tree generated, use:
+//   mono dump.exe name.tree
+//
+namespace Monodoc {
+using System;
+using System.IO;
+using System.Text;
+
+//
+// The simple provider generates the information source
+//
+public class SimpleProvider : Provider {
+       string basedir;
+       
+       public SimpleProvider (string base_directory)
+       {
+               basedir = base_directory;
+               if (!Directory.Exists (basedir))
+                       throw new FileNotFoundException (String.Format ("The directory `{0}' does not exist", basedir));
+       }
+
+       public override void PopulateTree (Tree tree)
+       {
+               Node top = tree.LookupNode ("Directory at: " + basedir, "simple:");
+               
+               foreach (string dir in Directory.GetDirectories (basedir)){
+                       string url = Path.GetFileName (dir);
+                       Node n = top.LookupNode ("Dir: " + url, url);
+                       PopulateDir (n, dir);
+               }
+       }
+
+#pragma warning disable 219
+       void PopulateDir (Node me, string dir)
+       {
+               Console.WriteLine ("Adding: " + dir);
+               foreach (string child_dir in Directory.GetDirectories (dir)){
+                       string url = Path.GetFileName (child_dir);
+                       Node n = me.LookupNode ("Dir: " + url, "simple-directory:" + url);
+                       PopulateDir (me, child_dir);
+               }
+
+               foreach (string file in Directory.GetFiles (dir)){
+                       Console.WriteLine ("   File: " + file);
+                       string file_code = me.tree.HelpSource.PackFile (file);
+
+                       //
+                       // The url element encoded for the file is:
+                       //  originalfilename#CODE
+                       //
+                       // The code is assigned to us after the file has been packaged
+                       // We use the original-filename later to render html or text files
+                       //
+                       Node n = me.LookupNode (Path.GetFileName (file), file + "#" + file_code);
+                       
+               }
+       }
+
+       public override void CloseTree (HelpSource hs, Tree tree)
+       {
+       }
+}
+
+//
+// The HelpSource is used during the rendering phase.
+//
+
+public class SimpleHelpSource : HelpSource {
+       Encoding enc;
+       
+       public SimpleHelpSource (string base_file, bool create) : base (base_file, create)
+       {
+               enc = new UTF8Encoding (false, false);
+       }
+
+       public override string GetText (string url, out Node match_node)
+       {
+               match_node = null;
+
+               string c = GetCachedText (url);
+               if (c != null)
+                       return c;
+
+               if (url.StartsWith ("simple:") || url.StartsWith ("simple-directory:"))
+                       return GetTextFromUrl (url);
+
+               return null;
+       }
+
+       string GetTextFromUrl (string url)
+       {
+               // Remove "simple:" prefix
+               url = url.Substring (7);
+
+               if (url.StartsWith ("simple-directory:"))
+                       return String.Format ("<html>This is a directory entry point: {0} </html>",
+                                             url.Substring (17));
+
+               // Otherwise the last element of the url is the file code we got.
+               int pound = url.LastIndexOf ("#");
+               string code;
+               if (pound == -1)
+                       code = url;
+               else
+                       code = url.Substring (pound+1);
+
+
+               Stream s = GetHelpStream (code);
+               if (s == null)
+                       return String.Format ("<html>No stream for this node: {0} </html>", url);
+
+               //
+               // Now, get the file type
+               //
+               int slash = url.LastIndexOf ("/");
+               string fname = url.Substring (slash + 1, pound - slash - 1).ToLower ();
+
+               if (fname.EndsWith (".html") || fname.EndsWith (".htm")){
+                       TextReader r = new StreamReader (s, enc);
+                       return r.ReadToEnd ();
+               }
+
+               if (fname.EndsWith (".png") || fname.EndsWith (".jpg") ||
+                   fname.EndsWith (".jpeg") || fname.EndsWith (".gif")){
+                       return "<html>Image file, have not implemented rendering this yet</html>";
+               }
+
+               // Convert text to HTML
+               StringBuilder result = new StringBuilder ("<html>");
+               TextReader reader = new StreamReader (s, enc);
+               string line;
+               
+               while ((line = reader.ReadLine ()) != null){
+                       result.Append (line);
+                       result.Append ("<br>");
+               }
+               result.Append ("<html>");
+               return result.ToString ();
+       }
+}
+}
diff --git a/mcs/tools/monkeydoc/Monkeydoc/providers/xhtml-provider.cs b/mcs/tools/monkeydoc/Monkeydoc/providers/xhtml-provider.cs
new file mode 100644 (file)
index 0000000..ce74b01
--- /dev/null
@@ -0,0 +1,200 @@
+//
+// A provider that uses Windows help file xhtml TOC files and looks for the
+// referenced documents to create the help source. 
+//
+// Authors:
+// Copyright 2003 Lee Mallabone <gnome@fonicmonkey.net>
+//   Johannes Roith <johannes@roith.de>
+//   Miguel de Icaza <miguel@ximian.com>
+
+using System;
+using System.IO;
+using System.Collections.Generic;
+using System.Text;
+using System.Text.RegularExpressions;
+using System.Xml;
+
+namespace MonkeyDoc.Providers
+{
+       public class XhtmlProvider : Provider
+       {
+               string tocFile;
+       
+               public XhtmlProvider (string handbookTocFile)
+               {
+                       tocFile = handbookTocFile;
+                       if (!File.Exists (tocFile))
+                               throw new FileNotFoundException (String.Format ("The table of contents, `{0}' does not exist", tocFile));               
+               }
+
+               public override void PopulateTree (Tree tree)
+               {
+                       //new SimpleHandbookTOCParser(tree, tocFile);
+                       // TODO: port it
+               }
+
+               public override void CloseTree (HelpSource hs, Tree tree)
+               {
+               }
+       }
+
+       public class XhtmlHelpSource : HelpSource
+       {
+               public XhtmlHelpSource (string base_file, bool create) : base (base_file, create)
+               {
+
+               }
+
+               const string XhtmlPrefix = "xhtml:";
+
+               protected override string UriPrefix {
+                       get {
+                               return XhtmlPrefix;
+                       }
+               }
+               
+               public override DocumentType GetDocumentTypeForId (string id, out Dictionary<string, string> extraArgs)
+               {
+                       extraArgs = null;
+                       return id == "root:" ? DocumentType.TocXml : DocumentType.MonoBook;
+               }
+
+               public override bool IsGeneratedContent (string id)
+               {
+                       return id == "root:";
+               }
+       
+               public override string GetText (string url)
+               {
+                       return TreeDumper.ExportToTocXml (Tree.RootNode, "Mono Handbook", string.Empty);
+               }
+
+               public static string GetAbsoluteLink(string target, string url)
+               {
+                       
+                       string value = null;
+               
+                       if (target.StartsWith ("#") ||
+                           target.StartsWith ("T:") ||
+                           target.StartsWith ("M:") ||
+                           target.StartsWith ("P:") ||
+                           target.StartsWith ("T:") ||
+                           target.StartsWith ("E:") ||
+                           target.StartsWith ("F:") ||
+                           target.StartsWith ("O:") ||
+                           target.StartsWith ("N:") ||
+                           target.StartsWith ("api:"))
+                               return null;
+               
+                       int endp = target.IndexOf(':');
+               
+                       if (endp == -1)
+                               endp = 0;
+                       string protocol = target.Substring(0, endp);
+                       switch (protocol) {
+                       case "mailto": 
+                       case "http":
+                       case "https":
+                       case "ftp":
+                       case "news":
+                       case "irc":
+                               break;
+                       default:
+                               // handle absolute urls like: /html/en/images/empty.png
+                               if (!target.StartsWith("/")) {
+                               
+                                       // url is something like "gnome/bindings/mono.html"
+                                       // This will get the path "gnome/bindings"
+                               
+                                       int slash = url.LastIndexOf ("/");
+                                       string tmpurl = url;
+                               
+                                       if (slash != -1)
+                                               tmpurl  = url.Substring(0, slash);
+                               
+                                       // Count "../" in target and go one level down
+                                       // for each in tmpurl, eventually, then remove "../".
+                               
+                                       Regex reg1 = new Regex("../");
+                                       MatchCollection matches = reg1.Matches(target);
+                               
+                                       for(int i = 1; i < matches.Count; i++) {
+                                               slash = tmpurl.LastIndexOf ("/");
+                                               if (slash != -1) 
+                                                       tmpurl  = tmpurl.Substring(0, slash);
+                                       }
+                               
+                                       target = target.Replace("../", "");
+                               
+                                       value = tmpurl + "/" + target;
+                               
+                               } else {
+                                       value = target.Substring(1, target.Length - 1);
+                               }
+                               break;
+                       }
+                       return value;
+               }
+       
+               XmlDocument RewriteLinks(XmlDocument docToProcess, string url)
+               {
+                       XmlNodeList nodeList = docToProcess.GetElementsByTagName("a");
+               
+                       foreach(XmlNode node in nodeList) {
+                       
+                               XmlElement element = (XmlElement) node;
+                       
+                               if (element.HasAttribute("href") ){
+                               
+                                       XmlAttribute href = element.GetAttributeNode("href");
+                                       string target = href.Value;
+                               
+                                       target = GetAbsoluteLink(target, url);
+                                       if (target != null) {
+                                               string newtarget = String.Format ("source-id:{0}:xhtml:{1}", SourceID, target);
+                                               href.Value = newtarget;
+                                       }
+                               }
+                       }
+
+                       nodeList = docToProcess.GetElementsByTagName("img");
+
+                       foreach(XmlNode node in nodeList) {
+                                                                                                                                    
+                               XmlElement element = (XmlElement) node;
+                                                                                                                                    
+                               if (element.HasAttribute("src") ){
+                                                                                                                                    
+                                       XmlAttribute href = element.GetAttributeNode("src");
+                                       string target = href.Value;
+                                                                                                                                    
+                                       target = GetAbsoluteLink(target, url);
+                                       if (target != null) {
+                                               string newtarget = String.Format ("source-id:{0}:xhtml:{1}", SourceID, target);
+                                               href.Value = newtarget;
+                                       }
+                               }               
+                       }
+
+                       return docToProcess;
+               }
+
+               public override void PopulateIndex (IndexMaker index_maker)
+               {
+                       PopulateIndexFromNodes (Tree.RootNode);
+               }
+
+               void PopulateIndexFromNodes (Node start)
+               {
+                       var nodes = start.Nodes;
+               
+                       if (nodes == null)
+                               Console.WriteLine ("Leaf: " + start.Caption);
+                       else {
+                               Console.WriteLine ("Root: " + start.Caption);
+                               foreach (Node n in nodes)
+                                       PopulateIndexFromNodes (n);
+                       }
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Monkeydoc/storage.cs b/mcs/tools/monkeydoc/Monkeydoc/storage.cs
new file mode 100644 (file)
index 0000000..9085ff5
--- /dev/null
@@ -0,0 +1,53 @@
+using System;
+using System.IO;
+using System.Collections.Generic;
+
+namespace MonkeyDoc
+{
+       // Define a storage mechanism for a help source
+       public interface IDocStorage : IDisposable
+       {
+               // Tell if the storage can store successive change to the doc as revision
+               bool SupportRevision { get; }
+               IDocRevisionManager RevisionManager { get; }
+
+               // Tell if the storage support modifying an existing data
+               bool SupportChange { get; }
+
+               /* Store data inside the storage backend
+                * if SupportChange is false and user try to store something with an existing id
+                * an exception will be thrown
+                * if id is null or empty, the storage will try to create an automatic id. In all
+                * case the id that has been used to store the content is returned by the method
+                */
+               string Store (string id, string text);
+               string Store (string id, byte[] data);
+               string Store (string id, Stream stream);
+
+               Stream Retrieve (string id);
+
+               IEnumerable<string> GetAvailableIds ();
+       }
+
+       public interface IDocRevisionManager
+       {
+               Stream RetrieveWithRevision (string id, string revision);
+
+               // This should be ordered by most recent first
+               IEnumerable<string> AvailableRevisionsForId (string id);
+               // This can simply be implemented with above property but it can also be
+               // a revision storage symbolic value like "HEAD"
+               string LatestRevisionForId (string id);
+
+               // A commit message for instance
+               string GetRevisionDescription (string revision);
+       }
+
+       public static class DocRevisionManagerExtensions
+       {
+               public static Stream RetrieveLatestRevision (this IDocRevisionManager revManager, string id)
+               {
+                       return revManager.RetrieveWithRevision (id, revManager.LatestRevisionForId (id));
+               }
+       }
+}
\ No newline at end of file
diff --git a/mcs/tools/monkeydoc/Monkeydoc/storage/ZipStorage.cs b/mcs/tools/monkeydoc/Monkeydoc/storage/ZipStorage.cs
new file mode 100644 (file)
index 0000000..1596d91
--- /dev/null
@@ -0,0 +1,100 @@
+using System;
+using System.IO;
+using System.Xml;
+using System.Linq;
+using System.Text;
+using System.Collections.Generic;
+
+using ICSharpCode.SharpZipLib.Zip;
+
+namespace MonkeyDoc.Storage
+{
+       public class ZipStorage : IDocStorage
+       {
+               ZipOutputStream zipOutput;
+               string zipFileName;
+               ZipFile zipFile;
+               int code;
+
+               public ZipStorage (string zipFileName)
+               {
+                       this.zipFileName = zipFileName;
+                       this.zipFile = new ZipFile (zipFileName);
+               }
+
+               public bool SupportRevision {
+                       get {
+                               return false;
+                       }
+               }
+
+               public IDocRevisionManager RevisionManager {
+                       get {
+                               return null;
+                       }
+               }
+
+               public bool SupportChange {
+                       get {
+                               return true;
+                       }
+               }
+
+               public string Store (string id, string text)
+               {
+                       SetupEntry (ref id);
+                       using (var writer = new StreamWriter (zipOutput)) {
+                               writer.Write (text);
+                               writer.Flush ();
+                       }
+                       return id;
+               }
+
+               public string Store (string id, byte[] data)
+               {
+                       SetupEntry (ref id);
+                       zipOutput.Write (data, 0, data.Length);
+                       return id;
+               }
+
+               public string Store (string id, Stream stream)
+               {
+                       SetupEntry (ref id);
+                       stream.CopyTo (zipOutput);
+                       return id;
+               }
+
+               void SetupEntry (ref string id)
+               {
+                       if (string.IsNullOrEmpty (id))
+                               id = GetNewCode ();
+
+                       ZipEntry entry = new ZipEntry (id);
+                       zipOutput.PutNextEntry (entry);
+               }
+
+               public Stream Retrieve (string id)
+               {
+                       ZipEntry entry = zipFile.GetEntry (id);
+                       if (entry != null)
+                               return zipFile.GetInputStream (entry);
+                       else
+                               throw new ArgumentException ("id", string.Format ("'{0}' isn't a valid id for this storage", id));
+               }
+
+               public IEnumerable<string> GetAvailableIds ()
+               {
+                       return zipFile.Cast<ZipEntry> ().Select (ze => ze.Name);
+               }
+
+               public void Dispose ()
+               {
+                       zipOutput.Dispose ();
+               }
+
+               string GetNewCode ()
+               {
+                       return String.Format ("{0}", code++);
+               }
+       }
+}
\ No newline at end of file
diff --git a/mcs/tools/monkeydoc/Mono.Documentation/ManifestResourceResolver.cs b/mcs/tools/monkeydoc/Mono.Documentation/ManifestResourceResolver.cs
new file mode 100644 (file)
index 0000000..402d137
--- /dev/null
@@ -0,0 +1,42 @@
+using System;
+using System.IO;
+using System.Reflection;
+using System.Xml;
+
+namespace Mono.Documentation {
+       public class ManifestResourceResolver : XmlUrlResolver {
+               private string[] dirs;
+
+               public ManifestResourceResolver (params string[] dirs)
+               {
+                       this.dirs = (string[]) dirs.Clone ();
+               }
+
+               public override Uri ResolveUri (Uri baseUri, string relativeUri)
+               {
+                       if (Array.IndexOf (
+                                               Assembly.GetExecutingAssembly ().GetManifestResourceNames (), 
+                                               relativeUri) >= 0)
+                                       return new Uri ("x-resource:///" + relativeUri);
+                       foreach (var dir in dirs) {
+                               if (File.Exists (Path.Combine (dir, relativeUri)))
+                                       return base.ResolveUri (new Uri ("file://" + new DirectoryInfo (dir).FullName + "/"), 
+                                                       relativeUri);
+                       }
+                       return base.ResolveUri (baseUri, relativeUri);
+               }
+
+               public override object GetEntity (Uri absoluteUri, string role, Type ofObjectToReturn)
+               {
+                       if (ofObjectToReturn == null)
+                               ofObjectToReturn = typeof(Stream);
+                       if (ofObjectToReturn != typeof(Stream))
+                               throw new XmlException ("This object type is not supported.");
+                       if (absoluteUri.Scheme != "x-resource")
+                               return base.GetEntity (absoluteUri, role, ofObjectToReturn);
+                       return Assembly.GetExecutingAssembly().GetManifestResourceStream (
+                                       absoluteUri.Segments [1]);
+               }
+       }
+}
+
diff --git a/mcs/tools/monkeydoc/Mono.Documentation/XmlDocUtils.cs b/mcs/tools/monkeydoc/Mono.Documentation/XmlDocUtils.cs
new file mode 100644 (file)
index 0000000..2f4cd08
--- /dev/null
@@ -0,0 +1,200 @@
+using System;
+using System.Collections;
+using System.IO;
+using System.Text;
+using System.Text.RegularExpressions;
+using System.Web;
+using System.Xml;
+
+namespace Mono.Documentation {
+
+       public delegate XmlDocument DocLoader (string escapedTypeName);
+
+       public static class XmlDocUtils
+       {
+               public static XmlNodeList GetMemberGenericParameters (XmlNode member)
+               {
+                       return member.SelectNodes ("Docs/typeparam");
+               }
+
+               public static XmlNodeList GetTypeGenericParameters (XmlNode member)
+               {
+                       return member.SelectNodes ("/Type/TypeParameters/TypeParameter");
+               }
+
+               public static string ToTypeName (string type, XmlNode member)
+               {
+                       return ToTypeName (type, GetTypeGenericParameters (member), 
+                                       GetMemberGenericParameters (member));
+               }
+
+               public static string ToTypeName (string type, XmlNodeList typeGenParams, XmlNodeList memberGenParams)
+               {
+                       type = type.Replace ("&", "@").Replace ("<", "{").Replace (">", "}");
+                       for (int i = 0; i < typeGenParams.Count; ++i) {
+                               string name = typeGenParams [i].InnerText;
+                               type = Regex.Replace (type, @"\b" + name + @"\b", "`" + i);
+                       }
+                       for (int i = 0; i < memberGenParams.Count; ++i) {
+                               string name = memberGenParams [i].Attributes ["name"].Value;
+                               type = Regex.Replace (type, @"\b" + name + @"\b", "``" + i);
+                       }
+                       return type;
+               }
+
+               public static string ToEscapedTypeName (string name)
+               {
+                       return GetCountedName (name, "`");
+               }
+
+               private static string GetCountedName (string name, string escape)
+               {
+                       int lt = name.IndexOf ("<");
+                       if (lt == -1)
+                               return name;
+                       StringBuilder type = new StringBuilder (name.Length);
+                       int start = 0;
+                       do {
+                               type.Append (name.Substring (start, lt - start));
+                               type.Append (escape);
+                               type.Append (GetGenericCount (name, lt, out start));
+                       } while ((lt = name.IndexOf ('<', start)) >= 0);
+                       if (start < name.Length)
+                               type.Append (name.Substring (start));
+                       return type.ToString ().Replace ("+", ".");
+               }
+
+               private static int GetGenericCount (string name, int start, out int end)
+               {
+                       int n = 1;
+                       bool r = true;
+                       int i = start;
+                       int depth = 1;
+                       for ( ++i; r && i < name.Length; ++i) {
+                               switch (name [i]) {
+                                       case ',': if (depth == 1) ++n; break;
+                                       case '<': ++depth; break;
+                                       case '>': --depth; if (depth == 0) r = false; break;
+                               }
+                       }
+                       end = i;
+                       return n;
+               }
+
+               public static string ToEscapedMemberName (string member)
+               {
+                       // Explicitly implemented interface members contain '.'s in the member
+                       // name, e.g. System.Collections.Generic.IEnumerable<A>.GetEnumerator.
+                       // CSC does a s/\./#/g for these.
+                       member = member.Replace (".", "#");
+                       if (member [member.Length-1] == '>') {
+                               int i = member.LastIndexOf ("<");
+                               int ignore;
+                               return member.Substring (0, i).Replace ("<", "{").Replace (">", "}") + 
+                                       "``" + GetGenericCount (member, i, out ignore);
+                       }
+                       return member.Replace ("<", "{").Replace (">", "}");
+               }
+
+               public static void AddExtensionMethods (XmlDocument typexml, ArrayList/*<XmlNode>*/ extensions, DocLoader loader)
+               {
+                       // if no members (enum, delegate) don't add extensions
+                       XmlNode m = typexml.SelectSingleNode ("/Type/Members");
+                       if (m == null)
+                               return;
+
+                       // static classes can't be targets:
+                       if (typexml.SelectSingleNode (
+                                               "/Type/TypeSignature[@Language='C#']/@Value")
+                                       .Value.IndexOf (" static ") >= 0)
+                               return;
+
+                       foreach (string s in GetSupportedTypes (typexml, loader)) {
+                               foreach (XmlNode extension in extensions) {
+                                       bool add = false;
+                                       foreach (XmlNode target in extension.SelectNodes ("Targets/Target")) {
+                                               if (target.Attributes ["Type"].Value == s) {
+                                                       add = true;
+                                                       break;
+                                               }
+                                       }
+                                       if (!add) {
+                                               continue;
+                                       }
+                                       foreach (XmlNode c in extension.SelectNodes ("Member")) {
+                                               XmlNode cm = typexml.ImportNode (c, true);
+                                               m.AppendChild (cm);
+                                       }
+                               }
+                       }
+               }
+
+               private static IEnumerable GetSupportedTypes (XmlDocument type, DocLoader loader)
+               {
+                       yield return "System.Object";
+                       yield return GetEscapedPath (type, "Type/@FullName");
+
+                       Hashtable h = new Hashtable ();
+                       GetInterfaces (h, type, loader);
+
+                       string s = GetEscapedPath (type, "Type/Base/BaseTypeName");
+                       if (s != null) {
+                               yield return s;
+                               XmlDocument d;
+                               string p = s;
+                               while (s != null && (d = loader (s)) != null) {
+                                       GetInterfaces (h, d, loader);
+                                       s = GetEscapedPath (d, "Type/Base/BaseTypeName");
+                                       if (p == s)
+                                               break;
+                                       yield return s;
+                               }
+                       }
+
+                       foreach (object o in h.Keys)
+                               yield return o.ToString ();
+               }
+
+               private static string GetEscapedPath (XmlDocument d, string path)
+               {
+                       XmlNode n = d.SelectSingleNode (path);
+                       if (n == null)
+                               return null;
+                       return "T:" + ToEscapedTypeName (n.InnerText);
+               }
+
+               private static void GetInterfaces (Hashtable ifaces, XmlDocument doc, DocLoader loader)
+               {
+                       foreach (XmlNode n in doc.SelectNodes ("Type/Interfaces/Interface/InterfaceName")) {
+                               string t = ToEscapedTypeName (n.InnerText);
+                               string tk = "T:" + t;
+                               if (!ifaces.ContainsKey (tk)) {
+                                       ifaces.Add (tk, null);
+                                       try {
+                                               XmlDocument d = loader (t);
+                                               if (d != null)
+                                                       GetInterfaces (ifaces, d, loader);
+                                       }
+                                       catch (FileNotFoundException e) {
+                                               // ignore; interface documentation couldn't be found.
+                                       }
+                               }
+                       }
+               }
+
+               // Turns e.g. sources/netdocs into sources/cache/netdocs
+               public static string GetCacheDirectory (string assembledBase)
+               {
+                       return Path.Combine (
+                                               Path.Combine (Path.GetDirectoryName (assembledBase), "cache"),
+                                               Path.GetFileName (assembledBase));
+               }
+
+               public static string GetCachedFileName (string cacheDir, string url)
+               {
+                       return Path.Combine (cacheDir,
+                                            Uri.EscapeUriString (url).Replace ('/', '+').Replace ("*", "%2a"));
+               }
+       }
+}
+
diff --git a/mcs/tools/monkeydoc/Mono.Utilities/LRUCache.cs b/mcs/tools/monkeydoc/Mono.Utilities/LRUCache.cs
new file mode 100644 (file)
index 0000000..e479a96
--- /dev/null
@@ -0,0 +1,92 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+using System.Collections.Generic;
+
+namespace Mono.Utilities
+{
+    public class LRUCache<TKey, TValue>
+    {
+           [ThreadStatic]
+           static LRUCache<TKey, TValue> deflt;
+
+           public static LRUCache<TKey, TValue> Default {
+                   get {
+                           return deflt != null ? deflt : (deflt = new LRUCache<TKey, TValue> (5));
+                   }
+           }
+
+        int capacity;
+        LinkedList<ListValueEntry<TKey, TValue>> list;
+        Dictionary<TKey, LinkedListNode<ListValueEntry<TKey, TValue>>> lookup;
+        LinkedListNode<ListValueEntry<TKey, TValue>> openNode;
+
+        public LRUCache (int capacity)
+        {
+            this.capacity = capacity;
+            this.list = new LinkedList<ListValueEntry<TKey, TValue>>();
+            this.lookup = new Dictionary<TKey, LinkedListNode<ListValueEntry<TKey, TValue>>> (capacity + 1);
+            this.openNode = new LinkedListNode<ListValueEntry<TKey, TValue>>(new ListValueEntry<TKey, TValue> (default(TKey), default(TValue)));
+        }
+
+        public void Put (TKey key, TValue value)
+        {
+            if (Get(key) == null) {
+                this.openNode.Value.ItemKey = key;
+                this.openNode.Value.ItemValue = value;
+                this.list.AddFirst (this.openNode);
+                this.lookup.Add (key, this.openNode);
+
+                if (this.list.Count > this.capacity) {
+                    // last node is to be removed and saved for the next addition to the cache
+                    this.openNode = this.list.Last;
+
+                    // remove from list & dictionary
+                    this.list.RemoveLast();
+                    this.lookup.Remove(this.openNode.Value.ItemKey);
+                } else {
+                    // still filling the cache, create a new open node for the next time
+                    this.openNode = new LinkedListNode<ListValueEntry<TKey, TValue>>(new ListValueEntry<TKey, TValue>(default(TKey), default(TValue)));
+                }
+            }
+        }
+
+        public TValue Get (TKey key)
+        {
+            LinkedListNode<ListValueEntry<TKey, TValue>> node = null;
+            if (!this.lookup.TryGetValue (key, out node))
+                return default (TValue);
+            this.list.Remove (node);
+            this.list.AddFirst (node);
+            return node.Value.ItemValue;
+        }
+
+        class ListValueEntry<K, V> where K : TKey 
+                                   where V : TValue
+        {
+            internal V ItemValue;
+            internal K ItemKey;
+
+            internal ListValueEntry(K key, V value)
+            {
+                this.ItemKey = key;
+                this.ItemValue = value;
+            }
+        }
+    }
+}
diff --git a/mcs/tools/monkeydoc/Mono.Utilities/MemoryLRU.cs b/mcs/tools/monkeydoc/Mono.Utilities/MemoryLRU.cs
new file mode 100644 (file)
index 0000000..8b74861
--- /dev/null
@@ -0,0 +1,92 @@
+/* 
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+using System;
+using System.Collections.Generic;
+
+namespace Mono.Utilities
+{
+    public class LRUCache<TKey, TValue>
+    {
+           [ThreadStatic]
+           static LRUCache<TKey, TValue> deflt;
+
+           public static LRUCache<TKey, TValue> Default {
+                   get {
+                           return deflt != null ? deflt : (deflt = new LRUCache<TKey, TValue> (5));
+                   }
+           }
+
+        int capacity;
+        LinkedList<ListValueEntry<TKey, TValue>> list;
+        Dictionary<TKey, LinkedListNode<ListValueEntry<TKey, TValue>>> lookup;
+        LinkedListNode<ListValueEntry<TKey, TValue>> openNode;
+
+        public LRUCache (int capacity)
+        {
+            this.capacity = capacity;
+            this.list = new LinkedList<ListValueEntry<TKey, TValue>>();
+            this.lookup = new Dictionary<TKey, LinkedListNode<ListValueEntry<TKey, TValue>>> (capacity + 1);
+            this.openNode = new LinkedListNode<ListValueEntry<TKey, TValue>>(new ListValueEntry<TKey, TValue> (default(TKey), default(TValue)));
+        }
+
+        public void Put (TKey key, TValue value)
+        {
+            if (Get(key) == null) {
+                this.openNode.Value.Itemkey = key;
+                this.openNode.Value.Itemvalue = value;
+                this.list.AddFirst (this.openNode);
+                this.lookup.Add (key, this.openNode);
+
+                if (this.list.Count > this.capacity) {
+                    // last node is to be removed and saved for the next addition to the cache
+                    this.openNode = this.list.Last;
+
+                    // remove from list & dictionary
+                    this.list.RemoveLast();
+                    this.lookup.Remove(this.openNode.Value.Itemkey);
+                } else {
+                    // still filling the cache, create a new open node for the next time
+                    this.openNode = new LinkedListNode<ListValueEntry<Tkey, Tvalue>>(new ListValueEntry<Tkey, Tvalue>(default(Tkey), default(Tvalue)));
+                }
+            }
+        }
+
+        public TValue Get (TKey key)
+        {
+            LinkedListNode<ListValueEntry<TKey, TValue>> node = null;
+            if (!this.lookup.TryGetValue (key, out node))
+                return default (TValue);
+            this.list.Remove (node);
+            this.list.AddFirst (node);
+            return node.Value.ItemValue;
+        }
+
+        class ListValueEntry<K, V> where K : TKey 
+                                   where V : TValue
+        {
+            internal V ItemValue;
+            internal K ItemKey;
+
+            internal ListValueEntry(K key, V value)
+            {
+                this.ItemKey = key;
+                this.ItemValue = value;
+            }
+        }
+    }
+}
diff --git a/mcs/tools/monkeydoc/Mono.Utilities/colorizer.cs b/mcs/tools/monkeydoc/Mono.Utilities/colorizer.cs
new file mode 100644 (file)
index 0000000..7444624
--- /dev/null
@@ -0,0 +1,171 @@
+using System;
+using System.Text.RegularExpressions;
+using System.Collections;
+
+namespace Mono.Utilities {
+       public class Colorizer {
+               //
+               // Syntax coloring
+               //
+
+               static string keywords_cs =
+                       "(\\babstract\\b|\\bevent\\b|\\bnew\\b|\\bstruct\\b|\\bas\\b|\\bexplicit\\b|\\bnull\\b|\\bswitch\\b|\\bbase\\b|\\bextern\\b|"
+                       +
+                       "\\bobject\\b|\\bthis\\b|\\bbool\\b|\\bfalse\\b|\\boperator\\b|\\bthrow\\b|\\bbreak\\b|\\bfinally\\b|\\bout\\b|\\btrue\\b|"
+                       +
+                       "\\bbyte\\b|\\bfixed\\b|\\boverride\\b|\\btry\\b|\\bcase\\b|\\bfloat\\b|\\bparams\\b|\\btypeof\\b|\\bcatch\\b|\\bfor\\b|"
+                       +
+                       "\\bprivate\\b|\\buint\\b|\\bchar\\b|\\bforeach\\b|\\bprotected\\b|\\bulong\\b|\\bchecked\\b|\\bgoto\\b|\\bpublic\\b|"
+                       +
+                       "\\bunchecked\\b|\\bclass\\b|\\bif\\b|\\breadonly\\b|\\bunsafe\\b|\\bconst\\b|\\bimplicit\\b|\\bref\\b|\\bushort\\b|"
+                       +
+                       "\\bcontinue\\b|\\bin\\b|\\breturn\\b|\\busing\\b|\\bdecimal\\b|\\bint\\b|\\bsbyte\\b|\\bvirtual\\b|\\bdefault\\b|"
+                       +
+                       "\\binterface\\b|\\bsealed\\b|\\bvolatile\\b|\\bdelegate\\b|\\binternal\\b|\\bshort\\b|\\bvoid\\b|\\bdo\\b|\\bis\\b|"
+                       +
+                       "\\bsizeof\\b|\\bwhile\\b|\\bdouble\\b|\\block\\b|\\bstackalloc\\b|\\belse\\b|\\blong\\b|\\bstatic\\b|\\benum\\b|"
+                       + "\\bnamespace\\b|\\bstring\\b)";
+
+#if false
+// currently not in use
+               static string keywords_vb =
+                       "(\\bAddHandler\\b|\\bAddressOf\\b|\\bAlias\\b|\\bAnd\\b|\\bAndAlso\\b|\\bAnsi\\b|\\bAs\\b|\\bAssembly\\b|"
+                       +
+                       "\\bAuto\\b|\\bBoolean\\b|\\bByRef\\b|\\bByte\\b|\\bByVal\\b|\\bCall\\b|\\bCase\\b|\\bCatch\\b|"
+                       +
+                       "\\bCBool\\b|\\bCByte\\b|\\bCChar\\b|\\bCDate\\b|\\bCDec\\b|\\bCDbl\\b|\\bChar\\b|\\bCInt\\b|"
+                       +
+                       "\\bClass\\b|\\bCLng\\b|\\bCObj\\b|\\bConst\\b|\\bCShort\\b|\\bCSng\\b|\\bCStr\\b|\\bCType\\b|"
+                       +
+                       "\\bDate\\b|\\bDecimal\\b|\\bDeclare\\b|\\bDefault\\b|\\bDelegate\\b|\\bDim\\b|\\bDirectCast\\b|\\bDo\\b|"
+                       +
+                       "\\bDouble\\b|\\bEach\\b|\\bElse\\b|\\bElseIf\\b|\\bEnd\\b|\\bEnum\\b|\\bErase\\b|\\bError\\b|"
+                       +
+                       "\\bEvent\\b|\\bExit\\b|\\bFalse\\b|\\bFinally\\b|\\bFor\\b|\\bFriend\\b|\\bFunction\\b|\\bGet\\b|"
+                       +
+                       "\\bGetType\\b|\\bGoSub\\b|\\bGoTo\\b|\\bHandles\\b|\\bIf\\b|\\bImplements\\b|\\bImports\\b|\\bIn\\b|"
+                       +
+                       "\\bInherits\\b|\\bInteger\\b|\\bInterface\\b|\\bIs\\b|\\bLet\\b|\\bLib\\b|\\bLike\\b|\\bLong\\b|"
+                       +
+                       "\\bLoop\\b|\\bMe\\b|\\bMod\\b|\\bModule\\b|\\bMustInherit\\b|\\bMustOverride\\b|\\bMyBase\\b|\\bMyClass\\b|"
+                       +
+                       "\\bNamespace\\b|\\bNew\\b|\\bNext\\b|\\bNot\\b|\\bNothing\\b|\\bNotInheritable\\b|\\bNotOverridable\\b|\\bObject\\b|"
+                       +
+                       "\\bOn\\b|\\bOption\\b|\\bOptional\\b|\\bOr\\b|\\bOrElse\\b|\\bOverloads\\b|\\bOverridable\\b|\\bOverrides\\b|"
+                       +
+                       "\\bParamArray\\b|\\bPreserve\\b|\\bPrivate\\b|\\bProperty\\b|\\bProtected\\b|\\bPublic\\b|\\bRaiseEvent\\b|\\bReadOnly\\b|"
+                       +
+                       "\\bReDim\\b|\\bREM\\b|\\bRemoveHandler\\b|\\bResume\\b|\\bReturn\\b|\\bSelect\\b|\\bSet\\b|\\bShadows\\b|"
+                       +
+                       "\\bShared\\b|\\bShort\\b|\\bSingle\\b|\\bStatic\\b|\\bStep\\b|\\bStop\\b|\\bString\\b|\\bStructure\\b|"
+                       +
+                       "\\bSub\\b|\\bSyncLock\\b|\\bThen\\b|\\bThrow\\b|\\bTo\\b|\\bTrue\\b|\\bTry\\b|\\bTypeOf\\b|"
+                       +
+                       "\\bUnicode\\b|\\bUntil\\b|\\bVariant\\b|\\bWhen\\b|\\bWhile\\b|\\bWith\\b|\\bWithEvents\\b|\\bWriteOnly\\b|\\bXor\\b)";
+#endif
+       
+               public static string Colorize(string text, string lang)
+               {
+                       lang = lang.Trim().ToLower();
+                       switch (lang) {
+                       case "xml":
+                               return ColorizeXml(text);
+                       case "cs": case "c#": case "csharp":
+                               return ColorizeCs(text);
+                       case "vb":
+                               return ColorizeVb(text);
+                       }
+                       return Escape (text);
+               }
+
+               static string ColorizeXml(string text)
+               {
+                       // Order is highly important.
+
+                       // s/ /&nbsp;/g must be first, as later substitutions add required spaces
+                       text = text.Replace(" ", "&nbsp;");
+
+                       // Find & mark XML elements
+                       Regex re = new Regex("<\\s*(\\/?)\\s*([\\s\\S]*?)\\s*(\\/?)\\s*>");
+                       text = re.Replace(text, "{blue:&lt;$1}{maroon:$2}{blue:$3&gt;}");
+
+                       // Colorize attribute strings; must be done before colorizing marked XML
+                       // elements so that we don't clobber the colorized XML tags.
+                       re = new Regex ("([\"'])(.*?)\\1");
+                       text = re.Replace (text, 
+                                       "$1<font color=\"purple\">$2</font>$1");
+
+                       // Colorize marked XML elements
+                       re = new Regex("\\{(\\w*):([\\s\\S]*?)\\}");
+                       //text = re.Replace(text, "<span style='color:$1'>$2</span>");
+                       text = re.Replace(text, "<font color=\"$1\">$2</font>");
+
+                       // Standard Structure
+                       text = text.Replace("\t", "&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;");
+                       re = new Regex("\r\n|\r|\n");
+                       text = re.Replace(text, "<br/>");
+
+                       return text;
+               }
+
+               static string ColorizeCs(string text)
+               {
+                       text = text.Replace(" ", "&nbsp;");
+
+                       text = text.Replace("<", "&lt;");
+                       text = text.Replace(">", "&gt;");
+
+                       Regex re = new Regex("\"((((?!\").)|\\\")*?)\"");
+
+                       text =
+                               re.Replace(text,
+                                               "<font color=\"purple\">\"$1\"</font>");
+                                               //"<span style='color:purple'>\"$1\"</span>");
+
+                       re = new
+                               Regex
+                               ("//(((.(?!\"</font>))|\"(((?!\").)*)\"</font>)*)(\r|\n|\r\n)");
+                               //("//(((.(?!\"</span>))|\"(((?!\").)*)\"</span>)*)(\r|\n|\r\n)");
+                       text =
+                               re.Replace(text,
+                                               "<font color=\"green\">//$1</font><br/>");
+                                       //      "<span style='color:green'>//$1</span><br/>");
+
+                       re = new Regex(keywords_cs);
+                       text = re.Replace(text, "<font color=\"blue\">$1</font>");
+                       //text = re.Replace(text, "<span style='color:blue'>$1</span>");
+
+                       text = text.Replace("\t", "&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;");
+                       text = text.Replace("\n", "<br/>");
+
+                       return text;
+               }
+
+               static string ColorizeVb(string text) {
+                       text = text.Replace(" ", "&nbsp;");
+
+                       /*      Regex re = new Regex ("\"((((?!\").)|\\\")*?)\"");
+                               text = re.Replace (text,"<span style='color:purple'>\"$1\"</span>");
+
+                               re = new Regex ("'(((.(?!\"\\<\\/span\\>))|\"(((?!\").)*)\"\\<\\/span\\>)*)(\r|\n|\r\n)");
+                               text = re.Replace (text,"<span style='color:green'>//$1</span><br/>");
+
+                               re = new Regex (keywords_vb);
+                               text = re.Replace (text,"<span style='color:blue'>$1</span>");
+                        */
+                       text = text.Replace("\t", "&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;");
+                       text = text.Replace("\n", "<br/>");
+                       return text;
+               }
+
+               static string Escape(string text)
+               {
+                       text = text.Replace("&", "&amp;");
+                       text = text.Replace(" ", "&nbsp;");
+                       text = text.Replace("<", "&lt;");
+                       text = text.Replace(">", "&gt;");
+                       text = text.Replace("\n", "<br/>");
+                       return text;
+               }
+       }
+}
diff --git a/mcs/tools/monkeydoc/Resources/.gitattributes b/mcs/tools/monkeydoc/Resources/.gitattributes
new file mode 100644 (file)
index 0000000..bfc0c0d
--- /dev/null
@@ -0,0 +1 @@
+/helper.js -crlf
diff --git a/mcs/tools/monkeydoc/Resources/Lminus.gif b/mcs/tools/monkeydoc/Resources/Lminus.gif
new file mode 100644 (file)
index 0000000..33c49d1
Binary files /dev/null and b/mcs/tools/monkeydoc/Resources/Lminus.gif differ
diff --git a/mcs/tools/monkeydoc/Resources/Lplus.gif b/mcs/tools/monkeydoc/Resources/Lplus.gif
new file mode 100644 (file)
index 0000000..9d2a2ac
Binary files /dev/null and b/mcs/tools/monkeydoc/Resources/Lplus.gif differ
diff --git a/mcs/tools/monkeydoc/Resources/base.css b/mcs/tools/monkeydoc/Resources/base.css
new file mode 100644 (file)
index 0000000..d264419
--- /dev/null
@@ -0,0 +1,36 @@
+/*
+* base.css: CSS applied to all the docs 
+*
+* Author: Mario Sopena
+*/
+
+body, table {
+       margin: 0px;
+}
+
+body, table, pre, p {
+       font-family: @@FONT_FAMILY@@, sans-serif;
+       /* font-size: @@FONT_SIZE@@pt; */
+       font-size: 10pt;
+}
+
+div.header {
+    background-color: #FAFBFD;
+    font-size: 1.7em;
+    font-weight: bold;
+    padding: 8px 0 0 10px;
+    font-family: 'Segoe UI',Verdana,Arial;
+}
+
+div.title {
+       font-size: 130%;
+       font-weight: bolder;
+       margin-top: 0.3em;
+       margin-left: 0.2em;
+       margin-bottom: 0.1em;
+}
+
+.subtitle {
+       font-style: italic;
+}
+
diff --git a/mcs/tools/monkeydoc/Resources/ecmaspec-html-css.xsl b/mcs/tools/monkeydoc/Resources/ecmaspec-html-css.xsl
new file mode 100644 (file)
index 0000000..d9c4f7b
--- /dev/null
@@ -0,0 +1,98 @@
+<?xml version="1.0"?>
+<xsl:stylesheet 
+       xmlns:xsl="http://www.w3.org/1999/XSL/Transform" 
+       version="1.0"
+       xmlns:monodoc="monodoc:///extensions"
+       exclude-result-prefixes="monodoc"
+       >
+<xsl:output omit-xml-declaration="yes" />
+
+<xsl:template match="/clause">
+       <div class="header" id="ecmaspec">
+               <div class="subtitle">ECMA-334 C# Language Specification</div> 
+               <div class="title"><xsl:value-of select="@number"/>: <xsl:value-of select="@title"/>
+               <xsl:if test="@informative"> (informative) </xsl:if></div>
+       </div>
+       <xsl:apply-templates />
+</xsl:template>
+
+<xsl:template match="paragraph">
+       <p>
+               <xsl:apply-templates />
+       </p>
+</xsl:template>
+
+<xsl:template match="keyword">
+        <span class="keyword"> <xsl:apply-templates/></span> <xsl:text> </xsl:text>
+</xsl:template>
+
+<xsl:template match="hyperlink">
+       <a href="ecmaspec:{.}">
+               <xsl:value-of select="." />
+       </a>
+</xsl:template>
+
+<xsl:template match="list">
+       <ul>
+               <xsl:for-each select="list_item|list">
+                       <li><xsl:apply-templates /></li>
+               </xsl:for-each>
+       </ul>
+</xsl:template>
+
+<xsl:template match="code_example">
+       <div class="code_example">
+          <div class="code_ex_title">Code example</div>
+          <span class="code">
+                 <xsl:value-of select="monodoc:Colorize(string(descendant-or-self::text()), string('csharp'))" disable-output-escaping="yes" />
+          </span>
+       </div>
+</xsl:template>
+
+<xsl:template match="symbol">
+       <span class="symbol">
+               <xsl:apply-templates />
+       </span>
+</xsl:template>
+
+<xsl:template match="grammar_production">
+       <dl class="nt_{name/non_terminal/.}">
+               <dt><xsl:value-of select="name/non_terminal/." /></dt>
+               
+               <xsl:for-each select="rhs">
+               <dd>
+                       <xsl:apply-templates select="node()" />
+               </dd>
+               </xsl:for-each>
+       </dl>
+</xsl:template>
+
+<xsl:template match="non_terminal">
+       <span class="non_terminal"><xsl:text> </xsl:text><xsl:value-of select="." /></span>
+</xsl:template>
+
+<xsl:template match="terminal">
+       <span class="terminal"><xsl:text> </xsl:text><xsl:value-of select="." /></span>
+</xsl:template>
+
+<xsl:template match="opt">
+       <xsl:text> (</xsl:text><span class="opt">optional</span><xsl:text>) </xsl:text>
+</xsl:template>
+
+<xsl:template match="note|example">
+       <div class="note">
+               <xsl:apply-templates />
+       </div>
+</xsl:template>
+
+<xsl:template match="table_line">
+    <xsl:apply-templates /><br />
+</xsl:template>
+
+<xsl:template match="@*|node()">
+       <xsl:copy>
+               <xsl:apply-templates select="@*|node()"/>
+       </xsl:copy>
+</xsl:template>
+
+</xsl:stylesheet>
diff --git a/mcs/tools/monkeydoc/Resources/ecmaspec-html.xsl b/mcs/tools/monkeydoc/Resources/ecmaspec-html.xsl
new file mode 100644 (file)
index 0000000..631ee03
--- /dev/null
@@ -0,0 +1,96 @@
+<?xml version="1.0"?>
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
+<xsl:output omit-xml-declaration="yes" />
+
+<xsl:template match="/clause">
+       <table width="100%" cellpadding="5">
+               <tr bgcolor="#b0c4de"><td>
+               <i>ECMA-334 C# Language Specification</i>
+
+               <h3>
+                       <xsl:value-of select="@number"/>: <xsl:value-of select="@title"/>
+                       
+                       <xsl:if test="@informative">
+                               (informative)
+                       </xsl:if>
+               </h3>
+               </td></tr>
+       </table>
+       
+       <xsl:apply-templates />
+</xsl:template>
+
+<xsl:template match="paragraph">
+       <p>
+               <xsl:apply-templates />
+       </p>
+</xsl:template>
+
+<xsl:template match="keyword">
+        <i> <xsl:apply-templates/></i> <xsl:text> </xsl:text>
+</xsl:template>
+
+<xsl:template match="hyperlink">
+       <a href="ecmaspec:{.}">
+               <xsl:value-of select="." />
+       </a>
+</xsl:template>
+
+<xsl:template match="list">
+       <ul>
+               <xsl:for-each select="list_item|list">
+                       <li><xsl:apply-templates /></li>
+               </xsl:for-each>
+       </ul>
+</xsl:template>
+
+<xsl:template match="code_example">
+  <table bgcolor="#f5f5dd" border="1" cellpadding="5">
+       <tr>
+         <td>
+           <pre>
+                 <xsl:apply-templates />
+           </pre>
+         </td>
+       </tr>
+  </table>
+</xsl:template>
+
+<xsl:template match="symbol">
+       <code>
+               <xsl:apply-templates />
+       </code>
+</xsl:template>
+
+<xsl:template match="grammar_production">
+       <dl id="nt_{name/non_terminal/.}">
+               <dt><xsl:value-of select="name/non_terminal/." /></dt>
+               
+               <xsl:for-each select="rhs">
+               <dd>
+                       <xsl:apply-templates select="node()" />
+               </dd>
+               </xsl:for-each>
+       </dl>
+</xsl:template>
+
+<xsl:template match="non_terminal">
+
+       <code><xsl:text> </xsl:text><xsl:value-of select="." /></code>
+</xsl:template>
+
+<xsl:template match="terminal">
+       <code><xsl:text> </xsl:text><xsl:value-of select="." /></code>
+</xsl:template>
+
+<xsl:template match="opt">
+       <sub>opt</sub>
+</xsl:template>
+
+<xsl:template match="@*|node()">
+       <xsl:copy>
+               <xsl:apply-templates select="@*|node()"/>
+       </xsl:copy>
+</xsl:template>
+
+</xsl:stylesheet>
diff --git a/mcs/tools/monkeydoc/Resources/ecmaspec.css b/mcs/tools/monkeydoc/Resources/ecmaspec.css
new file mode 100644 (file)
index 0000000..341f439
--- /dev/null
@@ -0,0 +1,69 @@
+/*
+* ecmaspec.css: CSS applied to ECMA C# specs
+*
+* Author: Mario Sopena
+*/
+
+#ecmaspec {
+       background: #a4dda4; /*#83b183;*/
+       border: 2px solid #556655;
+}
+
+p {
+       text-align: justify;
+       margin-top: .5em;
+       margin-bottom: .5em;
+}
+
+span.keyword {
+   color: #a6563a;
+}
+
+a:link {
+       text-decoration: none;
+}
+
+a:hover {
+       text-decoration: underline;
+}
+
+div.code_example {
+   background: #f5f5dd;
+   border: 1px solid #cdcd82;
+   border: 1px solid black;
+   padding-left: 1em;
+   padding-bottom: 1em;
+   margin-top: 1em;
+   font-family: fixed;
+   white-space: pre;
+   margin-bottom: 1em;
+}
+div.code_ex_title {
+   position: relative;
+   top: -1em;
+   left: 30%;
+   background: #cdcd82;
+   border: 1px solid black;
+   color: black;
+   text-transform: uppercase;
+   width: 40%;
+   padding: 0.3em;
+   text-align: center;
+}
+
+span.symbol {
+       font-weight: bolder;
+}
+
+
+span.optional {
+       font-style: italic;
+}
+
+div.note {
+   background: #cdcd82;
+   border: 1px solid black;
+   padding: 1em;
+   margin-top: 1em;
+   margin-bottom: 1em;
+}
diff --git a/mcs/tools/monkeydoc/Resources/helper.js b/mcs/tools/monkeydoc/Resources/helper.js
new file mode 100755 (executable)
index 0000000..2889c1b
--- /dev/null
@@ -0,0 +1,12 @@
+function toggle_display (block) {\r
+  var w = document.getElementById (block);\r
+  var t = document.getElementById (block + ":toggle");\r
+  if (w.style.display == "none") {\r
+    w.style.display = "block";\r
+               t.getElementsByTagName("img")[0].setAttribute ("src", "xtree/images/clean/Lminus.gif"); // <img src="xtree/images/clean/Lminus.gif">\r
+  } else {\r
+    w.style.display = "none";\r
+               t.getElementsByTagName("img")[0].setAttribute ("src", "xtree/images/clean/Lplus.gif"); // <img src="xtree/images/clean/Lplus.gif">\r
+  }\r
+}\r
+\r
diff --git a/mcs/tools/monkeydoc/Resources/home.html b/mcs/tools/monkeydoc/Resources/home.html
new file mode 100644 (file)
index 0000000..2d748a7
--- /dev/null
@@ -0,0 +1,69 @@
+<head>
+<style type="text/css">
+/* GENERAL */
+
+body, table {
+       font-family: @@FONT_FAMILY@@, sans-serif;
+       font-size: @@FONT_SIZE@@%;
+}
+
+/* ECMA BLOCK */
+#docs {
+       margin-bottom: 1em;
+}
+
+/* CONTRIBUTIONS */
+#contrib {
+       margin-top: 2em;
+       width: 98%;
+       margin-left: 1%;
+       color: black;
+       background: #fff3f3;
+       border: 1px solid #ffc9c9;
+       @@EDITING_ENABLED@@
+       }
+#contribTitle {
+       text-align: left;
+       font-weight: bold;
+       padding: .4em;
+       font-size: 110%;
+       @@CONTRIB_DISP@@
+}
+#contrib #content {
+       padding: .4em;
+}
+#some-contrib {
+       @@CONTRIB_DISP@@
+}
+#no-contrib {
+       @@NO_CONTRIB_DISP@@
+}
+#contrib p {
+       text-indent: 1em;
+       text-align: justify;
+       }
+</style>
+<link type="text/css" rel="stylesheet" href="mono-ecma.css"/>
+</head>
+
+<div class="Content">
+  <p>The following documentation collections are available:
+  
+  <div id="docs">
+       <ul>
+           @@API_DOCS@@
+       </ul>
+  </div>
+  
+  <div id="contrib">
+       <div id="contribTitle">Contributions</div>
+       <div id="content">
+               <div id="some-contrib">
+                       @@CONTRIBS@@
+               </div>
+               <div id="no-contrib">
+                       <p><b>You have not made any contributions yet.</b></p>
+                       <p>The Documentation of the libraries is not complete and your contributions would be greatly appreciated. The procedure is easy, browse to the part of the documentation you want to contribute to and click on the <font color="blue">[Edit]</font> link to start writing documentation.</p>
+                       <p>When you are happy with your changes, use the Contributing--&gt; Upload Contribution--&gt; menu to send your contributions to our server.</p></div>  </div>
+  </div>
+</div>
diff --git a/mcs/tools/monkeydoc/Resources/images/bc_bg.png b/mcs/tools/monkeydoc/Resources/images/bc_bg.png
new file mode 100644 (file)
index 0000000..6f7bca7
Binary files /dev/null and b/mcs/tools/monkeydoc/Resources/images/bc_bg.png differ
diff --git a/mcs/tools/monkeydoc/Resources/images/bc_separator.png b/mcs/tools/monkeydoc/Resources/images/bc_separator.png
new file mode 100644 (file)
index 0000000..c137258
Binary files /dev/null and b/mcs/tools/monkeydoc/Resources/images/bc_separator.png differ
diff --git a/mcs/tools/monkeydoc/Resources/images/error.png b/mcs/tools/monkeydoc/Resources/images/error.png
new file mode 100644 (file)
index 0000000..628cf2d
Binary files /dev/null and b/mcs/tools/monkeydoc/Resources/images/error.png differ
diff --git a/mcs/tools/monkeydoc/Resources/images/hatch.png b/mcs/tools/monkeydoc/Resources/images/hatch.png
new file mode 100644 (file)
index 0000000..33bf2c2
Binary files /dev/null and b/mcs/tools/monkeydoc/Resources/images/hatch.png differ
diff --git a/mcs/tools/monkeydoc/Resources/images/headerbg.png b/mcs/tools/monkeydoc/Resources/images/headerbg.png
new file mode 100644 (file)
index 0000000..15575da
Binary files /dev/null and b/mcs/tools/monkeydoc/Resources/images/headerbg.png differ
diff --git a/mcs/tools/monkeydoc/Resources/images/help.png b/mcs/tools/monkeydoc/Resources/images/help.png
new file mode 100644 (file)
index 0000000..5c87017
Binary files /dev/null and b/mcs/tools/monkeydoc/Resources/images/help.png differ
diff --git a/mcs/tools/monkeydoc/Resources/images/house.png b/mcs/tools/monkeydoc/Resources/images/house.png
new file mode 100644 (file)
index 0000000..fed6221
Binary files /dev/null and b/mcs/tools/monkeydoc/Resources/images/house.png differ
diff --git a/mcs/tools/monkeydoc/Resources/images/members.png b/mcs/tools/monkeydoc/Resources/images/members.png
new file mode 100644 (file)
index 0000000..4a8672b
Binary files /dev/null and b/mcs/tools/monkeydoc/Resources/images/members.png differ
diff --git a/mcs/tools/monkeydoc/Resources/images/namespace.png b/mcs/tools/monkeydoc/Resources/images/namespace.png
new file mode 100644 (file)
index 0000000..2bc1624
Binary files /dev/null and b/mcs/tools/monkeydoc/Resources/images/namespace.png differ
diff --git a/mcs/tools/monkeydoc/Resources/images/privclass.png b/mcs/tools/monkeydoc/Resources/images/privclass.png
new file mode 100644 (file)
index 0000000..bb0c871
Binary files /dev/null and b/mcs/tools/monkeydoc/Resources/images/privclass.png differ
diff --git a/mcs/tools/monkeydoc/Resources/images/privdelegate.png b/mcs/tools/monkeydoc/Resources/images/privdelegate.png
new file mode 100644 (file)
index 0000000..a5b470e
Binary files /dev/null and b/mcs/tools/monkeydoc/Resources/images/privdelegate.png differ
diff --git a/mcs/tools/monkeydoc/Resources/images/privenumeration.png b/mcs/tools/monkeydoc/Resources/images/privenumeration.png
new file mode 100644 (file)
index 0000000..df2c3c9
Binary files /dev/null and b/mcs/tools/monkeydoc/Resources/images/privenumeration.png differ
diff --git a/mcs/tools/monkeydoc/Resources/images/privevent.png b/mcs/tools/monkeydoc/Resources/images/privevent.png
new file mode 100644 (file)
index 0000000..e1d3887
Binary files /dev/null and b/mcs/tools/monkeydoc/Resources/images/privevent.png differ
diff --git a/mcs/tools/monkeydoc/Resources/images/privextension.png b/mcs/tools/monkeydoc/Resources/images/privextension.png
new file mode 100644 (file)
index 0000000..d336ddd
Binary files /dev/null and b/mcs/tools/monkeydoc/Resources/images/privextension.png differ
diff --git a/mcs/tools/monkeydoc/Resources/images/privfield.png b/mcs/tools/monkeydoc/Resources/images/privfield.png
new file mode 100644 (file)
index 0000000..0b246cf
Binary files /dev/null and b/mcs/tools/monkeydoc/Resources/images/privfield.png differ
diff --git a/mcs/tools/monkeydoc/Resources/images/privinterface.png b/mcs/tools/monkeydoc/Resources/images/privinterface.png
new file mode 100644 (file)
index 0000000..cde4b50
Binary files /dev/null and b/mcs/tools/monkeydoc/Resources/images/privinterface.png differ
diff --git a/mcs/tools/monkeydoc/Resources/images/privmethod.png b/mcs/tools/monkeydoc/Resources/images/privmethod.png
new file mode 100644 (file)
index 0000000..d698426
Binary files /dev/null and b/mcs/tools/monkeydoc/Resources/images/privmethod.png differ
diff --git a/mcs/tools/monkeydoc/Resources/images/privproperty.png b/mcs/tools/monkeydoc/Resources/images/privproperty.png
new file mode 100644 (file)
index 0000000..41a008d
Binary files /dev/null and b/mcs/tools/monkeydoc/Resources/images/privproperty.png differ
diff --git a/mcs/tools/monkeydoc/Resources/images/privstructure.png b/mcs/tools/monkeydoc/Resources/images/privstructure.png
new file mode 100644 (file)
index 0000000..ff064e6
Binary files /dev/null and b/mcs/tools/monkeydoc/Resources/images/privstructure.png differ
diff --git a/mcs/tools/monkeydoc/Resources/images/protclass.png b/mcs/tools/monkeydoc/Resources/images/protclass.png
new file mode 100644 (file)
index 0000000..0c32ce0
Binary files /dev/null and b/mcs/tools/monkeydoc/Resources/images/protclass.png differ
diff --git a/mcs/tools/monkeydoc/Resources/images/protdelegate.png b/mcs/tools/monkeydoc/Resources/images/protdelegate.png
new file mode 100644 (file)
index 0000000..ca44396
Binary files /dev/null and b/mcs/tools/monkeydoc/Resources/images/protdelegate.png differ
diff --git a/mcs/tools/monkeydoc/Resources/images/protenumeration.png b/mcs/tools/monkeydoc/Resources/images/protenumeration.png
new file mode 100644 (file)
index 0000000..14a4cf3
Binary files /dev/null and b/mcs/tools/monkeydoc/Resources/images/protenumeration.png differ
diff --git a/mcs/tools/monkeydoc/Resources/images/protevent.png b/mcs/tools/monkeydoc/Resources/images/protevent.png
new file mode 100644 (file)
index 0000000..613e88e
Binary files /dev/null and b/mcs/tools/monkeydoc/Resources/images/protevent.png differ
diff --git a/mcs/tools/monkeydoc/Resources/images/protextension.png b/mcs/tools/monkeydoc/Resources/images/protextension.png
new file mode 100644 (file)
index 0000000..f350d55
Binary files /dev/null and b/mcs/tools/monkeydoc/Resources/images/protextension.png differ
diff --git a/mcs/tools/monkeydoc/Resources/images/protfield.png b/mcs/tools/monkeydoc/Resources/images/protfield.png
new file mode 100644 (file)
index 0000000..6e08553
Binary files /dev/null and b/mcs/tools/monkeydoc/Resources/images/protfield.png differ
diff --git a/mcs/tools/monkeydoc/Resources/images/protinterface.png b/mcs/tools/monkeydoc/Resources/images/protinterface.png
new file mode 100644 (file)
index 0000000..4579a76
Binary files /dev/null and b/mcs/tools/monkeydoc/Resources/images/protinterface.png differ
diff --git a/mcs/tools/monkeydoc/Resources/images/protmethod.png b/mcs/tools/monkeydoc/Resources/images/protmethod.png
new file mode 100644 (file)
index 0000000..4ecb6ff
Binary files /dev/null and b/mcs/tools/monkeydoc/Resources/images/protmethod.png differ
diff --git a/mcs/tools/monkeydoc/Resources/images/protproperty.png b/mcs/tools/monkeydoc/Resources/images/protproperty.png
new file mode 100644 (file)
index 0000000..f79838a
Binary files /dev/null and b/mcs/tools/monkeydoc/Resources/images/protproperty.png differ
diff --git a/mcs/tools/monkeydoc/Resources/images/protstructure.png b/mcs/tools/monkeydoc/Resources/images/protstructure.png
new file mode 100644 (file)
index 0000000..9b806cc
Binary files /dev/null and b/mcs/tools/monkeydoc/Resources/images/protstructure.png differ
diff --git a/mcs/tools/monkeydoc/Resources/images/pubclass.png b/mcs/tools/monkeydoc/Resources/images/pubclass.png
new file mode 100644 (file)
index 0000000..7531558
Binary files /dev/null and b/mcs/tools/monkeydoc/Resources/images/pubclass.png differ
diff --git a/mcs/tools/monkeydoc/Resources/images/pubdelegate.png b/mcs/tools/monkeydoc/Resources/images/pubdelegate.png
new file mode 100644 (file)
index 0000000..19368a6
Binary files /dev/null and b/mcs/tools/monkeydoc/Resources/images/pubdelegate.png differ
diff --git a/mcs/tools/monkeydoc/Resources/images/pubenumeration.png b/mcs/tools/monkeydoc/Resources/images/pubenumeration.png
new file mode 100644 (file)
index 0000000..9adab41
Binary files /dev/null and b/mcs/tools/monkeydoc/Resources/images/pubenumeration.png differ
diff --git a/mcs/tools/monkeydoc/Resources/images/pubevent.png b/mcs/tools/monkeydoc/Resources/images/pubevent.png
new file mode 100644 (file)
index 0000000..7abef63
Binary files /dev/null and b/mcs/tools/monkeydoc/Resources/images/pubevent.png differ
diff --git a/mcs/tools/monkeydoc/Resources/images/pubextension.png b/mcs/tools/monkeydoc/Resources/images/pubextension.png
new file mode 100644 (file)
index 0000000..0725306
Binary files /dev/null and b/mcs/tools/monkeydoc/Resources/images/pubextension.png differ
diff --git a/mcs/tools/monkeydoc/Resources/images/pubfield.png b/mcs/tools/monkeydoc/Resources/images/pubfield.png
new file mode 100644 (file)
index 0000000..c2fc5a2
Binary files /dev/null and b/mcs/tools/monkeydoc/Resources/images/pubfield.png differ
diff --git a/mcs/tools/monkeydoc/Resources/images/pubinterface.png b/mcs/tools/monkeydoc/Resources/images/pubinterface.png
new file mode 100644 (file)
index 0000000..050ea99
Binary files /dev/null and b/mcs/tools/monkeydoc/Resources/images/pubinterface.png differ
diff --git a/mcs/tools/monkeydoc/Resources/images/pubmethod.png b/mcs/tools/monkeydoc/Resources/images/pubmethod.png
new file mode 100644 (file)
index 0000000..50ad06d
Binary files /dev/null and b/mcs/tools/monkeydoc/Resources/images/pubmethod.png differ
diff --git a/mcs/tools/monkeydoc/Resources/images/pubproperty.png b/mcs/tools/monkeydoc/Resources/images/pubproperty.png
new file mode 100644 (file)
index 0000000..2f0ef15
Binary files /dev/null and b/mcs/tools/monkeydoc/Resources/images/pubproperty.png differ
diff --git a/mcs/tools/monkeydoc/Resources/images/pubstructure.png b/mcs/tools/monkeydoc/Resources/images/pubstructure.png
new file mode 100644 (file)
index 0000000..161f2fc
Binary files /dev/null and b/mcs/tools/monkeydoc/Resources/images/pubstructure.png differ
diff --git a/mcs/tools/monkeydoc/Resources/images/reference.png b/mcs/tools/monkeydoc/Resources/images/reference.png
new file mode 100644 (file)
index 0000000..9720bf8
Binary files /dev/null and b/mcs/tools/monkeydoc/Resources/images/reference.png differ
diff --git a/mcs/tools/monkeydoc/Resources/images/treebg.png b/mcs/tools/monkeydoc/Resources/images/treebg.png
new file mode 100644 (file)
index 0000000..a5588a9
Binary files /dev/null and b/mcs/tools/monkeydoc/Resources/images/treebg.png differ
diff --git a/mcs/tools/monkeydoc/Resources/mdoc-html-format.xsl b/mcs/tools/monkeydoc/Resources/mdoc-html-format.xsl
new file mode 100644 (file)
index 0000000..10acd9c
--- /dev/null
@@ -0,0 +1,24 @@
+<?xml version="1.0"?>
+
+<!--
+       mdoc-html-format.xsl: HTML pass-through formatting support
+
+       Author: Jonathan Pryor (jpryor@novell.com)
+
+-->
+
+<xsl:stylesheet
+       version="1.0"
+       xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
+       >
+
+       <!-- pass-through any other elements unchanged - they may be HTML -->
+       <xsl:template match="//format[@type='text/html']//*">
+               <xsl:copy>
+                       <xsl:copy-of select="@*" />
+                       <xsl:apply-templates select="*|node()" />
+               </xsl:copy>
+       </xsl:template>
+
+</xsl:stylesheet>
+
diff --git a/mcs/tools/monkeydoc/Resources/mdoc-html-utils.xsl b/mcs/tools/monkeydoc/Resources/mdoc-html-utils.xsl
new file mode 100644 (file)
index 0000000..4403d09
--- /dev/null
@@ -0,0 +1,2771 @@
+<?xml version="1.0"?>
+
+<!--
+       mdoc-html-utils.xsl: ECMA-style docs to HTML stylesheet transformation utils
+
+       Author: Joshua Tauberer (tauberer@for.net)
+       Author: Jonathan Pryor (jpryor@novell.com)
+
+       This file requires that including files define the following callable
+       templates:
+               - CreateCodeBlock (language, content)
+               - CreateEnumerationTable (content)
+               - CreateHeader (content)
+               - CreateListTable (header, content)
+               - CreateMembersTable (content)
+               - CreateSignature (content)
+               - CreateTypeDocumentationTable (content)
+               - GetLinkTarget (type, cref)
+               - CreateEditLink (e)
+
+-->
+
+<xsl:stylesheet
+       version="1.0"
+       xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
+       xmlns:msxsl="urn:schemas-microsoft-com:xslt"
+       exclude-result-prefixes="msxsl"
+       >
+       <xsl:import href="mdoc-html-format.xsl" />
+       
+       <!-- TEMPLATE PARAMETERS -->
+       <xsl:param name="language" select="'C#'"/>
+       <xsl:param name="index" />
+       <xsl:param name="source-id"/>
+       
+       <xsl:variable name="ThisType" select="/Type"/>
+
+       <!-- The namespace that the current type belongs to. -->
+       <xsl:variable name="TypeNamespace" select="substring(/Type/@FullName, 1, string-length(/Type/@FullName) - string-length(/Type/@Name) - 1)"/>            
+
+       <!-- THE MAIN RENDERING TEMPLATE -->
+
+       <!-- TYPE OVERVIEW -->
+               
+       <xsl:template name="CreateTypeOverview">
+               <xsl:param name="implemented" />
+               <xsl:param name="show-members-link" />
+
+               <xsl:attribute name="id">
+                       <xsl:text>T:</xsl:text>
+                       <xsl:call-template name="GetEscapedTypeName">
+                               <xsl:with-param name="typename" select="@FullName" />
+                       </xsl:call-template>
+                       <xsl:text>:Summary</xsl:text>
+               </xsl:attribute>
+               <!-- summary -->
+               <xsl:apply-templates select="Docs/summary" mode="notoppara"/>
+               <xsl:apply-templates select="Docs/summary" mode="editlink"/>
+
+               <xsl:if test="$implemented">
+                       <p><b>Mono Implementation Note: </b></p>
+                       <blockquote>
+                               <xsl:value-of disable-output-escaping="yes" select="$implemented"/>
+                       </blockquote>
+               </xsl:if>
+
+               <xsl:if test="$show-members-link and not(Base/BaseTypeName='System.Enum' or Base/BaseTypeName='System.Delegate' or Base/BaseTypeName='System.MulticastDelegate') and count(Members)">
+                       <p>
+                               See Also:
+                               <a>
+                                       <xsl:attribute name="href">
+                                               <xsl:text>T</xsl:text>
+                                               <xsl:call-template name="GetLinkId">
+                                                       <xsl:with-param name="type" select="." />
+                                                       <xsl:with-param name="member" select="." />
+                                               </xsl:call-template>
+                                               <xsl:text>/*</xsl:text>
+                                       </xsl:attribute>
+                                       <xsl:value-of select="translate(@Name, '+', '.')"/>
+                                       <xsl:value-of select="' '" />
+                                       <xsl:text>Members</xsl:text>
+                               </a>
+                       </p>
+               </xsl:if>
+               
+               <!--
+               Inheritance tree, but only for non-standard classes and not for interfaces
+               -->
+               <xsl:if test="not(Base/BaseTypeName='System.Enum' or Base/BaseTypeName='System.Delegate' or Base/BaseTypeName='System.ValueType' or Base/BaseTypeName='System.Object' or Base/BaseTypeName='System.MulticatDelegate' or count(Base/ParentType)=0)">
+                       <p>
+                       <xsl:for-each select="Base/ParentType">
+                               <xsl:sort select="@Order" order="descending"/>
+                               <xsl:variable name="p" select="position()" />
+                               <xsl:for-each select="parent::Base/ParentType[position() &lt; $p]">
+                                       <xsl:value-of select="'&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;'" disable-output-escaping="yes"/>
+                               </xsl:for-each>
+                               <a>
+                                       <xsl:attribute name="href">
+                                               <xsl:call-template name="GetLinkTargetHtml">
+                                                       <xsl:with-param name="type" select="@Type" />
+                                                       <xsl:with-param name="cref">
+                                                               <xsl:text>T:</xsl:text>
+                                                               <xsl:call-template name="GetEscapedTypeName">
+                                                                       <xsl:with-param name="typename" select="@Type" />
+                                                               </xsl:call-template>
+                                                       </xsl:with-param>
+                                               </xsl:call-template>
+                                       </xsl:attribute>
+                                       <xsl:value-of select="@Type"/>
+                               </a>
+                               <br/>
+                       </xsl:for-each>
+
+                       <xsl:for-each select="Base/ParentType">
+                               <xsl:value-of select="'&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;'" disable-output-escaping="yes"/>
+                       </xsl:for-each>
+                       <xsl:value-of select="@FullName"/>
+                       </p>
+               </xsl:if>
+               <!--
+               <xsl:if test="Base/BaseTypeName='System.Enum'">
+                       <br/>
+                       The type of the values in this enumeration is 
+                       <xsl:apply-templates select="Members/Member[@MemberName='value__']/ReturnValue/ReturnType" mode="typelink"><xsl:with-param name="wrt" select="$TypeNamespace"/></xsl:apply-templates>.
+               </xsl:if>
+               -->
+       </xsl:template>
+
+       <xsl:template name="CreateTypeSignature">
+                       <xsl:attribute name="id">
+                               <xsl:text>T:</xsl:text>
+                               <xsl:call-template name="GetEscapedTypeName">
+                                       <xsl:with-param name="typename" select="@FullName" />
+                               </xsl:call-template>
+                               <xsl:text>:Signature</xsl:text>
+                       </xsl:attribute>
+                       <!-- signature -->
+                       <xsl:call-template name="CreateSignature">
+                               <xsl:with-param name="content">
+                                       <xsl:choose>
+                                       <xsl:when test="$language='C#'">
+
+                                               <xsl:for-each select="Attributes/Attribute">
+                                                       <xsl:text>[</xsl:text>
+                                                       <xsl:value-of select="AttributeName"/>
+                                                       <xsl:text>]</xsl:text>
+                                                       <br/>
+                                               </xsl:for-each>
+
+                                               <xsl:for-each select="ReturnValue/Attributes/Attribute">
+                                                       <xsl:text>[return:</xsl:text>
+                                                       <xsl:value-of select="AttributeName"/>
+                                                       <xsl:text>]</xsl:text>
+                                                       <br/>
+                                               </xsl:for-each> 
+       
+                                               <xsl:choose>
+
+                                               <xsl:when test="Base/BaseTypeName='System.Enum'">
+                                                       <xsl:call-template name="getmodifiers">
+                                                               <xsl:with-param name="sig" select="TypeSignature[@Language='C#']/@Value"/>
+                                                       </xsl:call-template>
+
+                                                       <xsl:text>enum </xsl:text>
+       
+                                                       <!-- member name, argument list -->
+                                                       <b>
+                                                       <xsl:value-of select="translate (@Name, '+', '.')"/>
+                                                       </b>
+                                               </xsl:when>
+       
+                                               <xsl:when test="Base/BaseTypeName='System.Delegate' or Base/BaseTypeName='System.MulticastDelegate'">
+                                                       <xsl:choose>
+
+                                                       <xsl:when test="count(Parameters) &gt; 0 and count(ReturnValue) &gt; 0">
+                                                       <!-- Only recreate the delegate signature if the appropriate information
+                                                               is present in the XML file. -->
+
+                                                       <xsl:call-template name="getmodifiers">
+                                                               <xsl:with-param name="sig" select="TypeSignature[@Language='C#']/@Value"/>
+                                                       </xsl:call-template>
+
+                                                       <xsl:text>delegate </xsl:text>
+       
+                                                       <xsl:apply-templates select="ReturnValue/ReturnType" mode="typelink"><xsl:with-param name="wrt" select="$TypeNamespace"/></xsl:apply-templates>
+       
+                                                       <!-- hard space -->
+                                                       <xsl:value-of select="' '"/>
+       
+                                                       <!-- member name, argument list -->
+                                                       <b>
+                                                               <xsl:call-template name="GetDefinitionName">
+                                                                       <xsl:with-param name="name" select="translate (@Name, '+', '.')" />
+                                                                       <xsl:with-param name="TypeParameters" select="TypeParameters" />
+                                                               </xsl:call-template>
+                                                       </b>
+
+                                                       <!-- hard space -->
+                                                       <xsl:value-of select="' '"/>
+
+                                                       <xsl:value-of select="'('"/> <!-- prevents whitespace issues -->
+                                                       
+                                                       <xsl:for-each select="Parameters/Parameter">
+                                                               <xsl:call-template name="ShowParameter">
+                                                                       <xsl:with-param name="Param" select="."/>
+                                                                       <xsl:with-param name="TypeNamespace" select="$TypeNamespace"/>
+                                                               </xsl:call-template>
+
+                                                               <xsl:if test="not(position()=last())">, </xsl:if>
+                                                       </xsl:for-each>
+                                                       
+                                                       <xsl:value-of select="')'"/>
+
+                                                       </xsl:when>
+                                                       
+                                                       <xsl:otherwise>
+                                                               <xsl:apply-templates select="TypeSignature[@Language=$language]/@Value"/>       
+                                                       </xsl:otherwise>
+
+                                                       </xsl:choose>
+
+                                                       
+                                               </xsl:when>
+
+                                               <xsl:otherwise>
+                                                       <xsl:call-template name="getmodifiers">
+                                                               <xsl:with-param name="sig" select="TypeSignature[@Language='C#']/@Value"/>
+                                                               <xsl:with-param name="typetype" select="true()"/>
+                                                       </xsl:call-template>
+               
+                                                       <xsl:value-of select="' '"/>
+               
+                                                       <b>
+                                                               <xsl:call-template name="GetDefinitionName">
+                                                                       <xsl:with-param name="name" select="translate (@Name, '+', '.')" />
+                                                                       <xsl:with-param name="TypeParameters" select="TypeParameters" />
+                                                               </xsl:call-template>
+                                                       </b>
+               
+                                                       <xsl:variable name="HasStandardBaseType" select="Base/BaseTypeName='System.Object' or Base/BaseTypeName='System.ValueType'"/>
+                                                       <xsl:variable name="HasBaseType" select="count(Base/BaseTypeName)>0"/>
+                                                       <xsl:if test="(($HasBaseType) and not($HasStandardBaseType)) or not(count(Interfaces/Interface)=0)">
+                                                               <xsl:text> : </xsl:text>
+               
+                                                               <xsl:if test="$HasBaseType and not($HasStandardBaseType)">
+                                                                       <xsl:apply-templates select="Base/BaseTypeName" mode="typelink"><xsl:with-param name="wrt" select="$TypeNamespace"/></xsl:apply-templates>
+                                                                       <xsl:if test="not(count(Interfaces/Interface)=0)">,     </xsl:if>
+                                                               </xsl:if>
+               
+                                                               <xsl:for-each select="Interfaces/Interface">
+                                                                       <xsl:if test="not(position()=1)">, </xsl:if>
+                                                                       <xsl:apply-templates select="InterfaceName" mode="typelink"><xsl:with-param name="wrt" select="$TypeNamespace"/></xsl:apply-templates>
+                                                               </xsl:for-each>
+                                                       
+                                                       </xsl:if>
+                                               </xsl:otherwise>
+
+                                               </xsl:choose>
+
+                                               <xsl:call-template name="CreateGenericConstraints">
+                                                       <xsl:with-param name="TypeParameters" select="TypeParameters" />
+                                               </xsl:call-template>
+
+                                       </xsl:when>
+
+                                       <xsl:otherwise>
+                                               <xsl:apply-templates select="TypeSignature[@Language=$language]/@Value"/>
+                                       </xsl:otherwise>
+                                       
+                                       </xsl:choose>
+                               </xsl:with-param>
+                       </xsl:call-template>
+       </xsl:template>
+
+       <xsl:template name="GetDefinitionName">
+               <xsl:param name="name" />
+               <xsl:param name="TypeParameters" />
+
+               <xsl:choose>
+                       <!-- do NOT process explicitly implemented generic interface members
+                            unless they're actually generic methods. -->
+                       <xsl:when test="contains ($name, '&gt;') and
+                                       '&gt;' = substring ($name, string-length ($name), 1)">
+                               <xsl:value-of select="substring-before ($name, '&lt;')" />
+                               <xsl:text>&lt;</xsl:text>
+                               <xsl:for-each select="$TypeParameters/TypeParameter">
+                                       <xsl:for-each select="Attributes/Attribute">
+                                               <xsl:text>[</xsl:text>
+                                               <xsl:value-of select="AttributeName"/>
+                                               <xsl:text>] </xsl:text>
+                                       </xsl:for-each>
+                                       <xsl:choose>
+                                               <xsl:when test="@Name">
+                                                       <xsl:value-of select="@Name" />
+                                               </xsl:when>
+                                               <xsl:otherwise>
+                                                       <xsl:value-of select="." />
+                                               </xsl:otherwise>
+                                       </xsl:choose>
+                                       <xsl:if test="not(position()=last())">, </xsl:if>
+                               </xsl:for-each>
+                               <xsl:text>&gt;</xsl:text>
+                       </xsl:when>
+                       <xsl:otherwise>
+                               <xsl:value-of select="$name" />
+                       </xsl:otherwise>
+               </xsl:choose>
+       </xsl:template>
+
+       <xsl:template name="CreateGenericConstraints">
+               <xsl:param name="TypeParameters" />
+
+               <xsl:for-each select="$TypeParameters/TypeParameter">
+                       <xsl:variable name="constraintsCount" select="count(Constraints/*)" />
+                       <xsl:if test="$constraintsCount > 0 and count(Constraints/*[.='Contravariant' or .='Covariant']) != $constraintsCount">
+                               <xsl:call-template name="CreateGenericParameterConstraints">
+                                       <xsl:with-param name="constraints" select="Constraints" />
+                               </xsl:call-template>
+                       </xsl:if>
+               </xsl:for-each>
+       </xsl:template>
+
+       <xsl:template name="CreateGenericParameterConstraints">
+               <xsl:param name="constraints" />
+
+               <br />
+               <xsl:text> where </xsl:text>
+               <xsl:value-of select="@Name" />
+               <xsl:text> : </xsl:text>
+               <xsl:variable name="kind" 
+                       select="count($constraints[ParameterAttribute='ReferenceTypeConstraint'])+
+                               count($constraints[ParameterAttribute='NotNullableValueTypeConstraint'])" />
+               <xsl:variable name="base" select="count($constraints/BaseTypeName)" />
+               <xsl:variable name="iface" select="count($constraints/InterfaceName)" />
+               <xsl:variable name="struct" select="$constraints/ParameterAttribute='NotNullableValueTypeConstraint'" />
+               <xsl:if test="$constraints/ParameterAttribute='ReferenceTypeConstraint'">
+                       <xsl:text>class</xsl:text>
+               </xsl:if>
+               <xsl:if test="$constraints/ParameterAttribute='NotNullableValueTypeConstraint'">
+                       <xsl:text>struct</xsl:text>
+               </xsl:if>
+               <xsl:if test="$constraints/BaseTypeName and not($struct)">
+                       <xsl:if test="$kind">, </xsl:if>
+                       <xsl:apply-templates select="$constraints/BaseTypeName" mode="typelink" />
+               </xsl:if>
+               <xsl:for-each select="$constraints/InterfaceName">
+                       <xsl:if test="position()=1">
+                               <xsl:if test="$kind or $base">, </xsl:if>
+                       </xsl:if>
+                       <xsl:apply-templates select="." mode="typelink" />
+                       <xsl:if test="not(position()=last())">, </xsl:if>
+               </xsl:for-each>
+               <xsl:if test="$constraints/ParameterAttribute='DefaultConstructorConstraint' and not($struct)">
+                       <xsl:if test="$base or $iface">, </xsl:if>
+                       <xsl:text>new()</xsl:text>
+               </xsl:if>
+       </xsl:template>
+
+       <xsl:template name="CreateMemberOverview">
+               <xsl:param name="implemented" />
+
+               <p class="Summary">
+                       <xsl:apply-templates select="Docs/summary" mode="notoppara"/>
+                       <xsl:apply-templates select="Docs/summary" mode="editlink"/>
+               </p>
+
+               <xsl:if test="$implemented">
+                       <p><b>Mono Implementation Note: </b></p>
+                       <blockquote>
+                               <xsl:value-of disable-output-escaping="yes" select="$implemented"/>
+                       </blockquote>
+               </xsl:if>
+
+               <!-- member value -->
+               <xsl:if test="MemberValue">
+               <p><b>Value: </b>
+                       <xsl:value-of select="MemberValue"/>
+               </p>
+               </xsl:if>
+
+       </xsl:template>
+
+       <xsl:template name="CreateRelatedSection">
+         <xsl:param name="section" />
+         <xsl:param name="type" />
+         <xsl:if test="count(Docs/related[@type=$type])">
+               <h3 class="{$type}"><xsl:value-of select="$section" /></h3>
+               <ul class="{$type}">
+                 <xsl:for-each select="Docs/related[@type=$type]">
+                       <li><a href="{@href}"><xsl:value-of select="." /></a></li>
+                 </xsl:for-each>
+               </ul>
+         </xsl:if>
+       </xsl:template>
+
+       <xsl:template name="CreatePlatformRequirements">
+         <!-- For now we only have that information in MonoTouch so only process that -->
+         <xsl:if test="starts-with(/Type/@FullName, 'MonoTouch')">
+               <xsl:choose>
+                 <!-- We first check if we have a [Since] at the member level -->
+                 <xsl:when test="count(Attributes/Attribute/AttributeName[starts-with(text(), 'MonoTouch.ObjCRuntime.Since')])">
+                       <b>Minimum iOS version: </b>
+                       <xsl:value-of select="translate(substring-before (substring-after (Attributes/Attribute/AttributeName[starts-with(text(), 'MonoTouch.ObjCRuntime.Since')], 'MonoTouch.ObjCRuntime.Since('), ')'), ', ', '.')" />
+                       <br />
+                 </xsl:when>
+                 <!-- If not, we then check at the type level -->
+                 <xsl:when test="count(/Type/Attributes/Attribute/AttributeName[starts-with(text(), 'MonoTouch.ObjCRuntime.Since')])">
+                       <b>Minimum iOS version: </b> 
+                       <xsl:value-of select="translate(substring-before (substring-after (/Type/Attributes/Attribute/AttributeName[starts-with(text(), 'MonoTouch.ObjCRuntime.Since')], 'MonoTouch.ObjCRuntime.Since('), ')'), ', ', '.')" />
+                       <br />
+                 </xsl:when>
+               </xsl:choose>
+         </xsl:if>
+       </xsl:template>
+
+       <xsl:template name="CreateMemberSignature">
+               <xsl:param name="linkid" select="''" />
+
+               <xsl:call-template name="CreateSignature">
+                       <xsl:with-param name="content">
+                       <xsl:if test="contains(MemberSignature[@Language='C#']/@Value,'this[')">
+                               <p><i>This is the default property for this class.</i></p>
+                       </xsl:if>
+
+                       <!-- recreate the signature -->
+               
+                       <xsl:for-each select="Attributes/Attribute[AttributeName != 'System.Runtime.CompilerServices.Extension']">
+                               <xsl:text>[</xsl:text>
+                               <xsl:value-of select="AttributeName"/>
+                               <xsl:text>]</xsl:text>
+                               <br/>
+                       </xsl:for-each> 
+
+                       <xsl:for-each select="ReturnValue/Attributes/Attribute">
+                               <xsl:text>[return:</xsl:text>
+                               <xsl:value-of select="AttributeName"/>
+                               <xsl:text>]</xsl:text>
+                               <br/>
+                       </xsl:for-each> 
+
+                       <xsl:call-template name="getmodifiers">
+                               <xsl:with-param name="sig" select="MemberSignature[@Language='C#']/@Value"/>
+                       </xsl:call-template>
+
+                       <xsl:if test="MemberType = 'Event'">
+                               <xsl:text>event </xsl:text>
+
+                               <xsl:if test="ReturnValue/ReturnType=''">
+                                       <xsl:value-of select="substring-before(substring-after(MemberSignature[@Language='C#']/@Value, 'event '), concat(' ', @MemberName))"/>
+                               </xsl:if>
+                       </xsl:if>
+
+                       <!-- return value (comes out "" where not applicable/available) -->
+                       <xsl:choose>
+                       <xsl:when test="@MemberName='op_Implicit'">
+                               <xsl:text>implicit operator</xsl:text>
+                       </xsl:when>
+                       <xsl:when test="@MemberName='op_Explicit'">
+                               <xsl:text>explicit operator</xsl:text>
+                       </xsl:when>
+                       <xsl:otherwise>
+                               <xsl:apply-templates select="ReturnValue/ReturnType" mode="typelink">
+                                       <xsl:with-param name="wrt" select="$TypeNamespace"/>
+                               </xsl:apply-templates>
+                       </xsl:otherwise>                                        
+                       </xsl:choose>
+
+                       <!-- hard space -->
+                       <xsl:value-of select="' '"/>
+
+                       <!-- member name -->
+                       <xsl:choose>
+                       
+                       <!-- Constructors get the name of the class -->
+                       <xsl:when test="MemberType='Constructor'">
+                               <b>
+                                       <xsl:call-template name="GetConstructorName">
+                                               <xsl:with-param name="type" select="../.." />
+                                               <xsl:with-param name="ctor" select="." />
+                                       </xsl:call-template>
+                               </b>
+                       </xsl:when>
+                       
+                       <!-- Conversion operators get the return type -->
+                       <xsl:when test="@MemberName='op_Implicit' or @MemberName='op_Explicit'">
+                               <xsl:apply-templates select="ReturnValue/ReturnType" mode="typelink">
+                                       <xsl:with-param name="wrt" select="$TypeNamespace"/>
+                               </xsl:apply-templates>
+                       </xsl:when>
+                       
+                       <!-- Regular operators get their symbol -->
+                       <xsl:when test="@MemberName='op_UnaryPlus'">operator+</xsl:when>
+                       <xsl:when test="@MemberName='op_UnaryNegation'">operator-</xsl:when>
+                       <xsl:when test="@MemberName='op_LogicalNot'">operator!</xsl:when>
+                       <xsl:when test="@MemberName='op_OnesComplement'">operator~</xsl:when>
+                       <xsl:when test="@MemberName='op_Increment'">operator++</xsl:when>
+                       <xsl:when test="@MemberName='op_Decrement'">operator--</xsl:when>
+                       <xsl:when test="@MemberName='op_True'">operator true</xsl:when>
+                       <xsl:when test="@MemberName='op_False'">operator false</xsl:when>
+                       <xsl:when test="@MemberName='op_Addition'">operator+</xsl:when>
+                       <xsl:when test="@MemberName='op_Subtraction'">operator-</xsl:when>
+                       <xsl:when test="@MemberName='op_Multiply'">operator*</xsl:when>
+                       <xsl:when test="@MemberName='op_Division'">operator/</xsl:when>
+                       <xsl:when test="@MemberName='op_Modulus'">operator%</xsl:when>
+                       <xsl:when test="@MemberName='op_BitwiseAnd'">operator&amp;</xsl:when>
+                       <xsl:when test="@MemberName='op_BitwiseOr'">operator|</xsl:when>
+                       <xsl:when test="@MemberName='op_ExclusiveOr'">operator^</xsl:when>
+                       <xsl:when test="@MemberName='op_LeftShift'">operator&lt;&lt;</xsl:when>
+                       <xsl:when test="@MemberName='op_RightShift'">operator&gt;&gt;</xsl:when>
+                       <xsl:when test="@MemberName='op_Equality'">operator==</xsl:when>
+                       <xsl:when test="@MemberName='op_Inequality'">operator!=</xsl:when>
+                       <xsl:when test="@MemberName='op_GreaterThan'">operator&gt;</xsl:when>
+                       <xsl:when test="@MemberName='op_LessThan'">operator&lt;</xsl:when>
+                       <xsl:when test="@MemberName='op_GreaterThanOrEqual'">operator&gt;=</xsl:when>
+                       <xsl:when test="@MemberName='op_LessThanOrEqual'">operator&lt;=</xsl:when>
+
+                       <xsl:when test="MemberType='Property' and count(Parameters/Parameter) &gt; 0">
+                               <!-- C# only permits indexer properties to have arguments -->
+                               <xsl:text>this</xsl:text>
+                       </xsl:when>
+                       
+                       <!-- Everything else just gets its name -->
+                       <xsl:when test="contains (@MemberName, '&lt;')">
+                               <b>
+                                       <xsl:call-template name="GetDefinitionName">
+                                               <xsl:with-param name="name" select="@MemberName" />
+                                               <xsl:with-param name="TypeParameters" select="TypeParameters" />
+                                       </xsl:call-template>
+                               </b>
+                       </xsl:when>
+
+                       <xsl:otherwise>
+                               <b><xsl:value-of select="@MemberName"/></b>
+                       </xsl:otherwise>
+                       </xsl:choose>
+
+                       <!-- hard space -->
+                       <xsl:value-of select="' '"/>
+
+                       <!-- argument list -->
+                       <xsl:if test="MemberType='Method' or MemberType='Constructor' or (MemberType='Property' and count(Parameters/Parameter))">
+                               <xsl:if test="not(MemberType='Property')">(</xsl:if>
+                               <xsl:if test="MemberType='Property'">[</xsl:if>
+
+                               <xsl:for-each select="Parameters/Parameter">
+                                       <xsl:call-template name="ShowParameter">
+                                               <xsl:with-param name="Param" select="."/>
+                                               <xsl:with-param name="TypeNamespace" select="$TypeNamespace"/>
+                                       </xsl:call-template>
+
+                                       <xsl:if test="not(position()=last())">, </xsl:if>
+                               </xsl:for-each>
+                               <xsl:if test="not(MemberType='Property')">)</xsl:if>
+                               <xsl:if test="MemberType='Property'">]</xsl:if>
+                       </xsl:if>
+
+                       <xsl:if test="MemberType='Property'">
+                               <xsl:value-of select="' '"/>
+                               <xsl:text>{</xsl:text>
+                               <xsl:value-of select="substring-before(substring-after(MemberSignature[@Language='C#']/@Value, '{'), '}')"/>
+                               <xsl:text>}</xsl:text>
+                       </xsl:if>
+                       <xsl:call-template name="CreateGenericConstraints">
+                               <xsl:with-param name="TypeParameters" select="TypeParameters" />
+                       </xsl:call-template>
+                       </xsl:with-param>
+               </xsl:call-template>
+               
+       </xsl:template>
+
+       <xsl:template name="GetConstructorName">
+               <xsl:param name="type" />
+               <xsl:param name="ctor" />
+
+               <xsl:choose>
+                       <xsl:when test="contains($type/@Name, '&lt;')">
+                               <xsl:value-of select="translate (substring-before ($type/@Name, '&lt;'), '+', '.')" />
+                       </xsl:when>
+                       <xsl:otherwise>
+                               <xsl:value-of select="translate ($type/@Name, '+', '.')" />
+                       </xsl:otherwise>
+               </xsl:choose>
+       </xsl:template>
+
+       <xsl:template name="ShowParameter">
+               <xsl:param name="Param"/>
+               <xsl:param name="TypeNamespace"/>
+               <xsl:param name="prototype" select="false()"/>
+
+               <xsl:if test="not($prototype)">
+                       <xsl:for-each select="$Param/Attributes/Attribute[not(Exclude='1') and not(AttributeName='ParamArrayAttribute' or AttributeName='System.ParamArray')]">
+                               <xsl:text>[</xsl:text>
+                               <xsl:value-of select="AttributeName"/>
+                               <xsl:text>]</xsl:text>
+                               <xsl:value-of select="' '"/>
+                       </xsl:for-each>
+               </xsl:if>
+
+               <xsl:if test="count($Param/Attributes/Attribute/AttributeName[.='ParamArrayAttribute' or .='System.ParamArray'])">
+                       <b>params</b>
+                       <xsl:value-of select="' '"/>
+               </xsl:if>
+
+               <xsl:if test="$Param/@RefType">
+                       <i><xsl:value-of select="$Param/@RefType"/></i>
+                       <!-- hard space -->
+                       <xsl:value-of select="' '"/>
+               </xsl:if>
+
+               <!-- parameter type link -->
+               <xsl:apply-templates select="$Param/@Type" mode="typelink">
+                       <xsl:with-param name="wrt" select="$TypeNamespace"/>
+               </xsl:apply-templates>
+
+               <xsl:if test="not($prototype)">
+                       <!-- hard space -->
+                       <xsl:value-of select="' '"/>
+       
+                       <!-- parameter name -->
+                       <xsl:value-of select="$Param/@Name"/>
+               </xsl:if>
+       </xsl:template>
+
+       <xsl:template name="DisplayDocsInformation">
+               <xsl:param name="linkid" />
+
+               <!-- The namespace that the current type belongs to. -->
+               <xsl:variable name="TypeNamespace" select="substring(@FullName, 1, string-length(@FullName) - string-length(@Name) - 1)"/>
+
+               <!-- alt member: not sure what these are for, actually -->
+
+               <xsl:if test="count(Docs/altmember)">
+                       <xsl:call-template name="CreateH4Section">
+                               <xsl:with-param name="name" select="'See Also'"/>
+                               <xsl:with-param name="child-id" select="concat ($linkid, ':See Also')" />
+                               <xsl:with-param name="content">
+                                       <xsl:for-each select="Docs/altmember">
+                                               <div><xsl:apply-templates select="@cref" mode="cref"/></div>
+                                       </xsl:for-each>
+                               </xsl:with-param>
+                       </xsl:call-template>
+               </xsl:if>
+
+               <!-- parameters & return & value -->
+
+               <xsl:if test="count(Docs/typeparam)">
+                       <xsl:call-template name="CreateH4Section">
+                               <xsl:with-param name="name" select="'Type Parameters'"/>
+                               <xsl:with-param name="child-id" select="concat ($linkid, ':Type Parameters')" />
+                               <xsl:with-param name="content">
+                                       <dl>
+                                       <xsl:for-each select="Docs/typeparam">
+                                               <dt><i><xsl:value-of select="@name"/></i></dt>
+                                               <dd>
+                                                       <xsl:apply-templates select="." mode="notoppara"/>
+                                                       <xsl:apply-templates select="." mode="editlink"/>
+                                               </dd>
+                                       </xsl:for-each>
+                                       </dl>
+                               </xsl:with-param>
+                       </xsl:call-template>
+               </xsl:if>
+               <xsl:if test="count(Docs/param)">
+                       <xsl:call-template name="CreateH4Section">
+                               <xsl:with-param name="name" select="'Parameters'"/>
+                               <xsl:with-param name="child-id" select="concat ($linkid, ':Parameters')" />
+                               <xsl:with-param name="content">
+                                       <dl>
+                                       <xsl:for-each select="Docs/param">
+                                               <dt><i><xsl:value-of select="@name"/></i></dt>
+                                               <dd>
+                                                       <xsl:apply-templates select="." mode="notoppara"/>
+                                                       <xsl:apply-templates select="." mode="editlink"/>
+                                               </dd>
+                                       </xsl:for-each>
+                                       </dl>
+                               </xsl:with-param>
+                       </xsl:call-template>
+               </xsl:if>
+               <xsl:if test="count(Docs/returns)">
+                       <xsl:call-template name="CreateH4Section">
+                               <xsl:with-param name="name" select="'Returns'"/>
+                               <xsl:with-param name="child-id" select="concat ($linkid, ':Returns')" />
+                               <xsl:with-param name="content">
+                                       <xsl:apply-templates select="Docs/returns" mode="notoppara"/>
+                                       <xsl:apply-templates select="Docs/returns" mode="editlink"/>
+                               </xsl:with-param>
+                       </xsl:call-template>
+               </xsl:if>
+               <xsl:if test="count(Docs/value)">
+                       <xsl:call-template name="CreateH4Section">
+                               <xsl:with-param name="name" select="'Value'"/>
+                               <xsl:with-param name="child-id" select="concat ($linkid, ':Value')" />
+                               <xsl:with-param name="content">
+                                       <xsl:apply-templates select="Docs/value" mode="notoppara"/>
+                                       <xsl:apply-templates select="Docs/value" mode="editlink"/>
+                               </xsl:with-param>
+                       </xsl:call-template>
+               </xsl:if>
+
+               <!-- method/property/constructor exceptions -->
+
+               <xsl:if test="count(Docs/exception)">
+                       <xsl:call-template name="CreateH4Section">
+                               <xsl:with-param name="name" select="'Exceptions'"/>
+                               <xsl:with-param name="child-id" select="concat ($linkid, ':Exceptions')" />
+                               <xsl:with-param name="content">
+                                       <xsl:call-template name="CreateTypeDocumentationTable">
+                                       <xsl:with-param name="content">
+                                       <xsl:for-each select="Docs/exception">
+                                               <tr valign="top">
+                                               <td>
+                                                       <xsl:apply-templates select="@cref" mode="typelink">
+                                                               <xsl:with-param name="wrt" select="$TypeNamespace"/>
+                                                       </xsl:apply-templates>
+                                               </td>
+                                               <td>
+                                                       <xsl:apply-templates select="." mode="notoppara"/>
+                                                       <xsl:apply-templates select="." mode="editlink"/>
+                                               </td>
+                                               </tr>
+                                       </xsl:for-each>
+                                       </xsl:with-param>
+                                       </xsl:call-template>
+                               </xsl:with-param>
+                       </xsl:call-template>
+               </xsl:if>
+
+               <!-- remarks -->
+
+               <xsl:if test="count(Docs/remarks)">
+                       <xsl:call-template name="CreateH2Section">
+                               <xsl:with-param name="name" select="'Remarks'"/>
+                               <xsl:with-param name="child-id" select="concat ($linkid, ':Remarks')" />
+                               <xsl:with-param name="content">
+                                       <xsl:apply-templates select="Docs/remarks" mode="notoppara"/>
+                                       <xsl:apply-templates select="Docs/remarks" mode="editlink"/>
+                               </xsl:with-param>
+                       </xsl:call-template>
+               </xsl:if>
+
+               <!-- thread safety -->
+
+               <xsl:if test="count(ThreadingSafetyStatement)">
+                       <xsl:call-template name="CreateH2Section">
+                               <xsl:with-param name="name" select="'Thread Safety'"/>
+                               <xsl:with-param name="child-id" select="concat ($linkid, ':Thread Safety')" />
+                               <xsl:with-param name="content">
+                                       <xsl:apply-templates select="ThreadingSafetyStatement" mode="notoppara"/>
+                               </xsl:with-param>
+                       </xsl:call-template>
+               </xsl:if>
+
+
+               <!-- permissions -->
+
+               <xsl:if test="count(Docs/permission)">
+                       <xsl:call-template name="CreateH2Section">
+                               <xsl:with-param name="name" select="'Permissions'"/>
+                               <xsl:with-param name="child-id" select="concat ($linkid, ':Permissions')" />
+                               <xsl:with-param name="content">
+                                       <xsl:call-template name="CreateTypeDocumentationTable">
+                                       <xsl:with-param name="content">
+                                       <xsl:for-each select="Docs/permission">
+                                               <tr valign="top">
+                                               <td>
+                                                       <xsl:apply-templates select="@cref" mode="typelink">
+                                                               <xsl:with-param name="wrt" select="$TypeNamespace"/>
+                                                       </xsl:apply-templates>
+                                                       <xsl:apply-templates select="." mode="editlink"/>
+                                               </td>
+                                               <td>
+                                                       <xsl:apply-templates select="." mode="notoppara"/>
+                                               </td>
+                                               </tr>
+                                       </xsl:for-each>
+                                       </xsl:with-param>
+                                       </xsl:call-template>
+                               </xsl:with-param>
+                       </xsl:call-template>
+               </xsl:if>
+
+               <!-- enumeration values -->
+
+               <xsl:if test="Base/BaseTypeName = 'System.Enum'">
+                       <xsl:call-template name="CreateH2Section">
+                               <xsl:with-param name="name" select="'Members'"/>
+                               <xsl:with-param name="child-id" select="concat ($linkid, ':Members')" />
+                               <xsl:with-param name="content">
+                                       <xsl:call-template name="CreateEnumerationTable">
+                                       <xsl:with-param name="content">
+
+                                               <xsl:for-each select="Members/Member[MemberType='Field']">
+                                                       <xsl:if test="not(@MemberName='value__')">
+                                                               <tr valign="top"><td>
+                                                                       <xsl:attribute name="id">
+                                                                               <xsl:text>F:</xsl:text>
+                                                                               <xsl:value-of select="translate (/Type/@FullName, '+', '.')" />
+                                                                               <xsl:text>.</xsl:text>
+                                                                               <xsl:value-of select="@MemberName" />
+                                                                       </xsl:attribute>
+                                                                       <b>
+                                                                               <xsl:value-of select="@MemberName"/>
+                                                                       </b>
+                                                               </td>
+                                                               <td>
+                                                                       <xsl:apply-templates select="Docs/summary" mode="notoppara"/>
+                                                                       <xsl:apply-templates select="Docs/summary" mode="editlink"/>
+                                                               </td>
+                                                               </tr>
+                                                       </xsl:if>
+                                               </xsl:for-each>
+                                       </xsl:with-param>
+                                       </xsl:call-template>
+                               </xsl:with-param>
+                       </xsl:call-template>
+               </xsl:if>
+
+               <!-- examples -->
+
+               <xsl:if test="count(Docs/example)">
+                       <xsl:for-each select="Docs/example">
+                               <xsl:call-template name="CreateH2Section">
+                                       <xsl:with-param name="name" select="'Example'"/>
+                                       <xsl:with-param name="child-id" select="concat ($linkid, ':Example:', position())" />
+                                       <xsl:with-param name="content">
+                                               <xsl:apply-templates select="." mode="notoppara"/>
+                                       </xsl:with-param>
+                               </xsl:call-template>
+                       </xsl:for-each>
+               </xsl:if>
+
+               <!-- related content -->
+               <xsl:if test="count(Docs/related)">
+                 <xsl:call-template name="CreateH2Section">
+                       <xsl:with-param name="name" select="'Related content'" />
+                       <xsl:with-param name="child-id" select="concat ($linkid, ':Related:')" />
+                       <xsl:with-param name="content">
+                         <div class="related">
+                               <xsl:call-template name="CreateRelatedSection">
+                                 <xsl:with-param name="section" select="'Articles'" />
+                                 <xsl:with-param name="type" select="'article'" />
+                               </xsl:call-template>
+                               <xsl:call-template name="CreateRelatedSection">
+                                 <xsl:with-param name="section" select="'Available Samples'" />
+                                 <xsl:with-param name="type" select="'sample'" />
+                               </xsl:call-template>
+                               <xsl:call-template name="CreateRelatedSection">
+                                 <xsl:with-param name="section" select="'Related specifications'" />
+                                 <xsl:with-param name="type" select="'specification'" />
+                               </xsl:call-template>
+                               <xsl:call-template name="CreateRelatedSection">
+                                 <xsl:with-param name="section" select="'External Documentation'" />
+                                 <xsl:with-param name="type" select="'externalDocumentation'" />
+                               </xsl:call-template>
+                         </div>
+                       </xsl:with-param>
+                 </xsl:call-template>
+               </xsl:if>
+
+               <xsl:call-template name="CreateH2Section">
+                       <xsl:with-param name="name" select="'Requirements'"/>
+                       <xsl:with-param name="child-id" select="concat ($linkid, ':Version Information')" />
+                       <xsl:with-param name="content">
+                               <xsl:call-template name="CreatePlatformRequirements" />
+                               <b>Namespace: </b><xsl:value-of select="substring(/Type/@FullName, 1, string-length(/Type/@FullName) - string-length(/Type/@Name) - 1)" />
+                               <xsl:if test="count(/Type/AssemblyInfo/AssemblyName) &gt; 0">
+                                       <br />
+                                       <b>Assembly: </b>
+                                       <xsl:value-of select="/Type/AssemblyInfo/AssemblyName" />
+                                       <xsl:text> (in </xsl:text>
+                                       <xsl:value-of select="/Type/AssemblyInfo/AssemblyName" />
+                                       <xsl:text>.dll)</xsl:text>
+                               </xsl:if>
+                               <xsl:if test="count(AssemblyInfo/AssemblyVersion) &gt; 0">
+                                       <br />
+                                       <b>Assembly Versions: </b>
+                                       <xsl:for-each select="AssemblyInfo/AssemblyVersion">
+                                               <xsl:if test="not(position()=1)">, </xsl:if>
+                                                       <xsl:value-of select="."/>
+                                       </xsl:for-each>
+                               </xsl:if>
+                               <xsl:if test="count(Docs/since) &gt; 0">
+                                       <br />
+                                       <b>Since: </b>
+                                       <xsl:for-each select="Docs/since">
+                                               <xsl:if test="not(position()=1)">; </xsl:if>
+                                                       <xsl:value-of select="@version"/>
+                                       </xsl:for-each>
+                               </xsl:if>
+                               <xsl:if test="count(Docs/since)=0 and count(/Type/Docs/since) &gt; 0">
+                                       <br />
+                                       <b>Since: </b>
+                                       <xsl:for-each select="/Type/Docs/since">
+                                               <xsl:if test="not(position()=1)">; </xsl:if>
+                                                       <xsl:value-of select="@version"/>
+                                       </xsl:for-each>
+                               </xsl:if>
+                       </xsl:with-param>
+               </xsl:call-template>
+       </xsl:template>
+
+       
+       <!-- Transforms the contents of the selected node into a hyperlink to the type named by the node.  The node can contain a type name (eg System.Object) or a type link (eg T:System.String). Use wrt parameter to specify the current namespace. -->
+
+       <xsl:template match="*|@*" mode="typelink">
+               <xsl:param name="wrt" select="'notset'"/>
+               
+               <xsl:call-template name="maketypelink">
+                               <xsl:with-param name="type" select="."/>
+                               <xsl:with-param name="wrt" select="$wrt"/>
+               </xsl:call-template>
+       </xsl:template>
+
+       <xsl:template name="makenamespacelink">
+               <xsl:param name="cref" select="''"/>
+
+               <a>
+                       <xsl:attribute name="href">
+                               <xsl:call-template name="GetLinkTargetHtml">
+                                       <xsl:with-param name="cref" select="$cref" />
+                               </xsl:call-template>
+                       </xsl:attribute>
+       
+                       <xsl:value-of select="substring-after ($cref, 'N:')" />
+               </a>
+       </xsl:template>
+
+       <xsl:template name="maketypelink">
+               <xsl:param name="type" select="'notset'"/>
+               <xsl:param name="wrt" select="'notset'"/>
+               <xsl:param name="nested" select="0"/>
+
+               <xsl:variable name="btype">
+                       <xsl:call-template name="ToBrackets">
+                               <xsl:with-param name="s" select="$type" />
+                       </xsl:call-template>
+               </xsl:variable>
+
+               <xsl:variable name="array">
+                       <xsl:call-template name="GetArraySuffix">
+                               <xsl:with-param name="type" select="$type" />
+                       </xsl:call-template>
+               </xsl:variable>
+               
+               <xsl:choose>
+
+               <!-- chop off T: -->
+               <xsl:when test="starts-with($type, 'T:')">
+                       <xsl:call-template name="maketypelink">
+                               <xsl:with-param name="type" select="substring($type, 3)"/>
+                               <xsl:with-param name="wrt" select="$wrt"/>
+                       </xsl:call-template>
+               </xsl:when>
+
+               <xsl:when test="contains ($type, '&amp;') and 
+                               '&amp;' = substring ($type, string-length ($type), 1)">
+                       <xsl:call-template name="maketypelink">
+                               <xsl:with-param name="type" select="substring($type, 1, string-length($type)-1)"/>
+                               <xsl:with-param name="wrt" select="$wrt"/>
+                       </xsl:call-template>
+               </xsl:when>
+
+               <xsl:when test="string($array)">
+                       <xsl:call-template name="maketypelink">
+                               <xsl:with-param name="type" select="substring($type, 1, string-length($type) - string-length ($array))"/>
+                               <xsl:with-param name="wrt" select="$wrt"/>
+                       </xsl:call-template>
+                       <xsl:value-of select="$array"/>
+               </xsl:when>
+
+               <xsl:when test="contains ($type, '*') and
+                               '*' = substring ($type, string-length ($type), 1)">
+                       <xsl:call-template name="maketypelink">
+                               <xsl:with-param name="type" select="substring($type, 1, string-length($type)-1)"/>
+                               <xsl:with-param name="wrt" select="$wrt"/>
+                       </xsl:call-template>
+                       <xsl:value-of select="'*'"/>
+               </xsl:when>
+               
+               <!-- if this is a generic type parameter, don't make a link but italicize it and give it a tooltip instead -->
+               <xsl:when test="count($ThisType/TypeParameters/TypeParameter[@Name=$type] | 
+                               $ThisType/TypeParameters/TypeParameter[child::text()=$type] |
+                               ancestor::Member/Docs/typeparam[@name=$type]) = 1">
+                       <!-- note that we check if it is a generic type using /Type/TypeParameters because that will have type parameters declared in an outer class if this is a nested class, but then we get the tooltip text from the type parameters documented in this file -->
+                       <i title="{$ThisType/Docs/typeparam[@name=$type] | ancestor::Member/Docs/typeparam[@name=$type]}"><xsl:value-of select="$type"/></i>
+               </xsl:when>
+               
+               <!-- if this is a generic type parameter of a base type, replace it with the type that it was instantiated with -->
+               <xsl:when test="count(ancestor::Members/BaseTypeArgument[@TypeParamName=$type]) = 1">
+                       <!-- note that an overridden type parameter may be referenced in a type parameter within $type, but we can't replace that nicely since we can't parse generic type names here -->
+                       <xsl:call-template name="maketypelink">
+                               <xsl:with-param name="type" select="ancestor::Members/BaseTypeArgument[@TypeParamName=$type]"/>
+                               <xsl:with-param name="wrt" select="$wrt"/>
+                       </xsl:call-template>
+               </xsl:when>
+               
+
+               <xsl:otherwise>
+                       <xsl:variable name="escaped-type">
+                               <xsl:call-template name="GetEscapedTypeName">
+                                       <xsl:with-param name="typename" select="$btype" />
+                               </xsl:call-template>
+                       </xsl:variable>
+                       <a>
+                               <xsl:attribute name="href">
+                                       <xsl:call-template name="GetLinkTargetHtml">
+                                               <xsl:with-param name="type" select="$escaped-type" />
+                                               <xsl:with-param name="cref" select="concat ('T:', $escaped-type)" />
+                                       </xsl:call-template>
+                               </xsl:attribute>
+       
+                               <xsl:call-template name="GetTypeDisplayName">
+                                       <xsl:with-param name="T" select="$btype"/>
+                                       <xsl:with-param name="wrt" select="$wrt"/>
+                               </xsl:call-template>
+                       </a>
+               </xsl:otherwise>
+               </xsl:choose>
+       </xsl:template>
+
+       <xsl:template name="GetArraySuffix">
+               <xsl:param name="type" />
+
+               <xsl:if test="contains ($type, ']') and 
+                               ']' = substring ($type, string-length ($type), 1)">
+                       <xsl:variable name="start">
+                               <xsl:call-template name="GetArraySuffixStart">
+                                       <xsl:with-param name="type" select="$type" />
+                                       <xsl:with-param name="i" select="string-length ($type) - 1" />
+                               </xsl:call-template>
+                       </xsl:variable>
+                       <xsl:value-of select="substring ($type, $start)" />
+               </xsl:if>
+       </xsl:template>
+
+       <xsl:template name="GetArraySuffixStart">
+               <xsl:param name="type" />
+               <xsl:param name="i" />
+
+               <xsl:choose>
+                       <xsl:when test="substring ($type, $i, 1) = '['">
+                               <xsl:value-of select="$i" />
+                       </xsl:when>
+                       <xsl:otherwise>
+                               <xsl:call-template name="GetArraySuffixStart">
+                                       <xsl:with-param name="type" select="$type" />
+                                       <xsl:with-param name="i" select="$i - 1" />
+                               </xsl:call-template>
+                       </xsl:otherwise>
+               </xsl:choose>
+       </xsl:template>
+
+       <xsl:template name="GetTypeDisplayName">
+               <xsl:param name="T"/>
+               <xsl:param name="wrt"/>
+
+                               <!-- use C#-style names -->
+                               <xsl:choose>
+                                       <xsl:when test="$T='System.Object'">object</xsl:when>
+                                       <xsl:when test="$T='System.Boolean'">bool</xsl:when>
+                                       <xsl:when test="$T='System.Byte'">byte</xsl:when>
+                                       <xsl:when test="$T='System.Char'">char</xsl:when>
+                                       <xsl:when test="$T='System.Decimal'">decimal</xsl:when>
+                                       <xsl:when test="$T='System.Double'">double</xsl:when>
+                                       <xsl:when test="$T='System.Int16'">short</xsl:when>
+                                       <xsl:when test="$T='System.Int32'">int</xsl:when>
+                                       <xsl:when test="$T='System.Int64'">long</xsl:when>
+                                       <xsl:when test="$T='System.SByte'">sbyte</xsl:when>
+                                       <xsl:when test="$T='System.Single'">float</xsl:when>
+                                       <xsl:when test="$T='System.String'">string</xsl:when>
+                                       <xsl:when test="$T='System.UInt16'">ushort</xsl:when>
+                                       <xsl:when test="$T='System.UInt32'">uint</xsl:when>
+                                       <xsl:when test="$T='System.UInt64'">ulong</xsl:when>
+                                       <xsl:when test="$T='System.Void'">void</xsl:when>
+
+                                       <xsl:when test="contains($T, '&lt;')">
+                                               <xsl:call-template name="GetTypeDisplayName">
+                                                       <xsl:with-param name="T" select="substring-before ($T, '&lt;')" />
+                                                       <xsl:with-param name="wrt" select="$wrt" />
+                                               </xsl:call-template>
+                                               <xsl:text>&lt;</xsl:text>
+                                               <xsl:call-template name="GetMemberArgList">
+                                                       <xsl:with-param name="arglist" select="substring-after ($T, '&lt;')" />
+                                                       <xsl:with-param name="wrt" select="$wrt" />
+                                               </xsl:call-template>
+                                               <!-- don't need to append &gt; as GetMemberArgList (eventually) appends it -->
+                                       </xsl:when>
+       
+                                       <!-- if the type is in the wrt namespace, omit the namespace name -->
+                                       <xsl:when test="not($wrt='') and starts-with($T, concat($wrt,'.')) and not(contains(substring-after($T,concat($wrt,'.')), '.'))">
+                                               <xsl:value-of select="translate (substring-after($T,concat($wrt,'.')), '+', '.')"/>
+                                       </xsl:when>
+       
+                                       <!-- if the type is in the System namespace, omit the namespace name -->
+                                       <xsl:when test="starts-with($T, 'System.') and not(contains(substring-after($T, 'System.'), '.'))">
+                                               <xsl:value-of select="translate (substring-after($T,'System.'), '+', '.')"/>
+                                       </xsl:when>
+       
+                                       <!-- if the type is in the System.Collections namespace, omit the namespace name -->
+                                       <xsl:when test="starts-with($T, 'System.Collections.') and not(contains(substring-after($T, 'System.Collections.'), '.'))">
+                                               <xsl:value-of select="translate (substring-after($T,'System.Collections.'), '+', '.')"/>
+                                       </xsl:when>
+
+                                       <!-- if the type is in the System.Collections.Generic namespace, omit the namespace name -->
+                                       <xsl:when test="starts-with($T, 'System.Collections.Generic.') and not(contains(substring-after($T, 'System.Collections.Generic.'), '.'))">
+                                               <xsl:value-of select="translate (substring-after($T,'System.Collections.Generic.'), '+', '.')"/>
+                                       </xsl:when>
+
+                                       <xsl:otherwise>
+                                               <xsl:value-of select="translate ($T, '+', '.')" />
+                                       </xsl:otherwise>
+                               </xsl:choose>
+       </xsl:template>
+
+       <xsl:template name="GetMemberDisplayName">
+               <xsl:param name="memberName" />
+               <xsl:param name="isproperty" select="false()" />
+
+               <xsl:choose>
+                       <xsl:when test="contains($memberName, '.')">
+                               <xsl:call-template name="GetTypeDisplayName">
+                                       <xsl:with-param name="T">
+                                               <xsl:call-template name="GetTypeName">
+                                                       <xsl:with-param name="type" select="$memberName"/>
+                                               </xsl:call-template>
+                                       </xsl:with-param>
+                                       <xsl:with-param name="wrt" select="''" />
+                               </xsl:call-template>
+                               <xsl:text>.</xsl:text>
+                               <xsl:call-template name="GetMemberName">
+                                       <xsl:with-param name="type" select="$memberName" />
+                                       <xsl:with-param name="isproperty" select="$isproperty"/>
+                               </xsl:call-template>
+                       </xsl:when>
+                       <xsl:otherwise>
+                               <xsl:value-of select="$memberName" />
+                       </xsl:otherwise>
+               </xsl:choose>
+       </xsl:template>
+
+       <xsl:template name="ToBrackets">
+               <xsl:param name="s" />
+               <xsl:value-of select="translate (translate ($s, '{', '&lt;'), '}', '&gt;')" />
+       </xsl:template>
+
+       <xsl:template name="ToBraces">
+               <xsl:param name="s" />
+               <xsl:value-of select="translate (translate ($s, '&lt;', '{'), '&gt;', '}')" />
+       </xsl:template>
+       
+       <xsl:template name="memberlinkprefix">
+               <xsl:param name="member" />
+               <xsl:choose>
+                       <xsl:when test="$member/MemberType='Constructor'">C</xsl:when>
+                       <xsl:when test="$member/MemberType='Method'">M</xsl:when>
+                       <xsl:when test="$member/MemberType='Property'">P</xsl:when>
+                       <xsl:when test="$member/MemberType='Field'">F</xsl:when>
+                       <xsl:when test="$member/MemberType='Event'">E</xsl:when>
+               </xsl:choose>
+       </xsl:template>
+
+       <xsl:template name="makememberlink">
+               <xsl:param name="cref"/>
+
+               <xsl:variable name="bcref">
+                       <xsl:call-template name="ToBrackets">
+                               <xsl:with-param name="s" select="$cref" />
+                       </xsl:call-template>
+               </xsl:variable>
+
+               <xsl:variable name="fullname">
+                       <xsl:choose>
+                               <xsl:when test="starts-with($bcref, 'C:') or starts-with($bcref, 'T:')">
+                                       <xsl:choose>
+                                               <xsl:when test="contains($bcref, '(')">
+                                                       <xsl:value-of select="substring (substring-before ($bcref, '('), 3)" />
+                                               </xsl:when>
+                                               <xsl:otherwise>
+                                                       <xsl:value-of select="substring($bcref, 3)" />
+                                               </xsl:otherwise>
+                                       </xsl:choose>
+                               </xsl:when>
+                               <xsl:otherwise>
+                                       <xsl:call-template name="GetTypeName">
+                                               <xsl:with-param name="type" select="substring($bcref, 3)"/>
+                                       </xsl:call-template>
+                               </xsl:otherwise>
+                       </xsl:choose>
+               </xsl:variable>
+
+               <xsl:variable name="memberName">
+                       <xsl:choose>
+                               <xsl:when test="starts-with($bcref, 'T:')" />
+                               <xsl:when test="starts-with($bcref, 'C:') and not(contains($bcref, '('))" />
+                               <xsl:when test="starts-with($bcref, 'C:') and contains($bcref, '(')">
+                                       <xsl:text>(</xsl:text>
+                                       <xsl:call-template name="GetMemberArgList">
+                                               <xsl:with-param name="arglist" select="substring-before(substring-after($bcref, '('), ')')" />
+                                               <xsl:with-param name="wrt" select="$TypeNamespace" />
+                                       </xsl:call-template>
+                                       <xsl:text>)</xsl:text>
+                               </xsl:when>
+                               <xsl:otherwise>
+                                       <xsl:text>.</xsl:text>
+                                       <xsl:call-template name="GetMemberName">
+                                               <xsl:with-param name="type" select="substring($bcref, 3)" />
+                                               <xsl:with-param name="wrt" select="$fullname"/>
+                                               <xsl:with-param name="isproperty" select="starts-with($bcref, 'P:')"/>
+                                       </xsl:call-template>
+                               </xsl:otherwise>
+                       </xsl:choose>
+               </xsl:variable>
+
+               <xsl:variable name="escaped-type">
+                       <xsl:call-template name="GetEscapedTypeName">
+                               <xsl:with-param name="typename">
+                                       <xsl:call-template name="ToBrackets">
+                                               <xsl:with-param name="s" select="$fullname" />
+                                       </xsl:call-template>
+                               </xsl:with-param>
+                       </xsl:call-template>
+               </xsl:variable>
+               <xsl:variable name="displayname">
+                       <xsl:call-template name="GetTypeDisplayName">
+                               <xsl:with-param name="T" select="$fullname" />
+                               <xsl:with-param name="wrt" select="$TypeNamespace"/>
+                       </xsl:call-template>
+               </xsl:variable>
+               <a>
+                       <xsl:attribute name="href">
+                               <xsl:call-template name="GetLinkTargetHtml">
+                                       <xsl:with-param name="type" select="$escaped-type" />
+                                       <xsl:with-param name="cref">
+                                               <xsl:call-template name="ToBraces">
+                                                       <xsl:with-param name="s" select="$cref" />
+                                               </xsl:call-template>
+                                       </xsl:with-param>
+                               </xsl:call-template>
+                       </xsl:attribute>
+                       <xsl:value-of select="translate (concat($displayname, $memberName), '+', '.')" />
+               </a>
+       </xsl:template>
+
+       <xsl:template name="GetTypeName">
+               <xsl:param name="type" />
+               <xsl:variable name="prefix" select="substring-before($type, '.')" />
+               <xsl:variable name="suffix" select="substring-after($type, '.')" />
+               <xsl:choose>
+                       <xsl:when test="contains($type, '(')">
+                               <xsl:call-template name="GetTypeName">
+                                       <xsl:with-param name="type" select="substring-before($type, '(')" />
+                               </xsl:call-template>
+                       </xsl:when>
+                       <xsl:when test="not(contains($suffix, '.'))">
+                               <xsl:value-of select="$prefix" />
+                       </xsl:when>
+                       <xsl:otherwise>
+                               <xsl:value-of select="$prefix" />
+                               <xsl:text>.</xsl:text>
+                               <xsl:call-template name="GetTypeName">
+                                       <xsl:with-param name="type" select="$suffix" />
+                               </xsl:call-template>
+                       </xsl:otherwise>
+               </xsl:choose>
+       </xsl:template>
+
+       <xsl:template name="GetMemberName">
+               <xsl:param name="type" />
+               <xsl:param name="isproperty" select="0"/>
+               <xsl:variable name="prefix" select="substring-before($type, '.')" />
+               <xsl:variable name="suffix" select="substring-after($type, '.')" />
+               <xsl:choose>
+                       <xsl:when test="contains($type, '(')">
+                               <xsl:call-template name="GetMemberName">
+                                       <xsl:with-param name="type" select="substring-before($type, '(')" />
+                               </xsl:call-template>
+                               <xsl:text>(</xsl:text>
+                               <xsl:call-template name="GetMemberArgList">
+                                       <xsl:with-param name="arglist" select="substring-before(substring-after($type, '('), ')')" />
+                                       <xsl:with-param name="wrt" select="$TypeNamespace" />
+                               </xsl:call-template>
+                               <xsl:text>)</xsl:text>
+                       </xsl:when>
+                       <xsl:when test="not(contains($suffix, '.'))">
+                               <xsl:value-of select="$suffix" />
+                       </xsl:when>
+                       <xsl:otherwise>
+                               <xsl:call-template name="GetMemberName">
+                                       <xsl:with-param name="type" select="$suffix" />
+                               </xsl:call-template>
+                       </xsl:otherwise>
+               </xsl:choose>
+       </xsl:template>
+
+       <xsl:template name="GetMemberArgList">
+               <xsl:param name="arglist" />
+               <xsl:param name="wrt" select="''"/>
+
+               <xsl:variable name="_arglist">
+                       <xsl:choose>
+                               <xsl:when test="starts-with ($arglist, ',')">
+                                       <xsl:value-of select="substring-after ($arglist, ',')" />
+                               </xsl:when>
+                               <xsl:otherwise>
+                                       <xsl:value-of select="$arglist" />
+                               </xsl:otherwise>
+                       </xsl:choose>
+               </xsl:variable>
+
+               <xsl:if test="starts-with ($arglist, ',')">
+                       <xsl:text>, </xsl:text>
+               </xsl:if>
+
+               <xsl:variable name="c"  select="substring-before ($_arglist, ',')" />
+               <xsl:variable name="lt" select="substring-before ($_arglist, '&lt;')" />
+               <xsl:variable name="gt" select="substring-before ($_arglist, '&gt;')" />
+
+               <xsl:choose>
+                       <!-- Need to insert ',' between type arguments -->
+                       <xsl:when test="
+                                       ($c != '' and $lt != '' and $gt != '' and 
+                                        string-length ($c) &lt; string-length ($lt) and 
+                                        string-length ($c) &lt; string-length ($gt)) or
+                                       ($c != '' and $lt != '' and $gt = '' and
+                                        string-length ($c) &lt; string-length ($lt)) or
+                                       ($c != '' and $lt = '' and $gt != '' and
+                                        string-length ($c) &lt; string-length ($gt)) or
+                                       ($c != '' and $lt = '' and $gt = '')">
+                               <xsl:call-template name="GetTypeDisplayName">
+                                       <xsl:with-param name="T" select="$c"/>
+                                       <xsl:with-param name="wrt" select="$wrt"/>
+                               </xsl:call-template>
+                               <xsl:text>, </xsl:text>
+                               <xsl:call-template name="GetMemberArgList">
+                                       <xsl:with-param name="arglist" select="substring-after($_arglist, ',')" />
+                                       <xsl:with-param name="wrt" select="$wrt" />
+                               </xsl:call-template>
+                       </xsl:when>
+
+                       <!-- start of nested type argument list < -->
+                       <xsl:when test="
+                                       ($c != '' and $lt != '' and $gt != '' and 
+                                        string-length ($lt) &lt; string-length ($c) and 
+                                        string-length ($lt) &lt; string-length ($gt)) or
+                                       ($c != '' and $lt != '' and $gt = '' and
+                                        string-length ($lt) &lt; string-length ($c)) or
+                                       ($c = '' and $lt != '' and $gt != '' and
+                                        string-length ($lt) &lt; string-length ($gt))">
+                               <xsl:call-template name="GetTypeDisplayName">
+                                       <xsl:with-param name="T" select="$lt"/>
+                                       <xsl:with-param name="wrt" select="$wrt"/>
+                               </xsl:call-template>
+                               <xsl:text>&lt;</xsl:text>
+                               <xsl:call-template name="GetMemberArgList">
+                                       <xsl:with-param name="arglist" select="substring-after($_arglist, '&lt;')" />
+                                       <xsl:with-param name="wrt" select="$wrt" />
+                               </xsl:call-template>
+                       </xsl:when>
+
+                       <!-- end of (nested?) type argument list > -->
+                       <xsl:when test="
+                                       ($c != '' and $lt != '' and $gt != '' and 
+                                        string-length ($gt) &lt; string-length ($c) and 
+                                        string-length ($gt) &lt; string-length ($lt)) or
+                                       ($c != '' and $lt = '' and $gt = '' and
+                                        string-length ($gt) &lt; string-length ($c)) or
+                                       ($c = '' and $lt != '' and $gt != '' and
+                                        string-length ($gt) &lt; string-length ($lt)) or
+                                       ($c = '' and $lt = '' and $gt != '')">
+                               <xsl:call-template name="GetTypeDisplayName">
+                                       <xsl:with-param name="T" select="$gt"/>
+                                       <xsl:with-param name="wrt" select="$wrt"/>
+                               </xsl:call-template>
+                               <xsl:text>&gt;</xsl:text>
+                               <xsl:call-template name="GetMemberArgList">
+                                       <xsl:with-param name="arglist" select="substring-after($_arglist, '&gt;')" />
+                                       <xsl:with-param name="wrt" select="$wrt" />
+                               </xsl:call-template>
+                       </xsl:when>
+
+                       <!-- nothing left to do -->
+                       <xsl:otherwise>
+                               <xsl:call-template name="GetTypeDisplayName">
+                                       <xsl:with-param name="T" select="$_arglist"/>
+                                       <xsl:with-param name="wrt" select="$wrt"/>
+                               </xsl:call-template>
+                       </xsl:otherwise>
+               </xsl:choose>
+       </xsl:template>
+       
+       <!-- Transforms the contents of the selected node containing a cref into a hyperlink. -->
+       <xsl:template match="*|@*" mode="cref">
+               <xsl:call-template name="makememberlink">
+                       <xsl:with-param name="cref" select="."/>
+               </xsl:call-template>
+               <!--
+               <a>
+                       <xsl:attribute name="href"><xsl:value-of select="."/></xsl:attribute>
+                       <xsl:value-of select="substring-after(., ':')"/></a>
+                       -->
+       </xsl:template>
+
+       <xsl:template name="membertypeplural">
+               <xsl:param name="name"/>
+               <xsl:choose>
+               <xsl:when test="$name='ExtensionMethod'">Extension Methods</xsl:when>
+               <xsl:when test="$name='Constructor'">Constructors</xsl:when>
+               <xsl:when test="$name='Property'">Properties</xsl:when>
+               <xsl:when test="$name='Method'">Methods</xsl:when>
+               <xsl:when test="$name='Field'">Fields</xsl:when>
+               <xsl:when test="$name='Event'">Events</xsl:when>
+               <xsl:when test="$name='Operator'">Operators</xsl:when>
+               <xsl:when test="$name='Explicit'">Explicitly Implemented Interface Members</xsl:when>
+               </xsl:choose>
+       </xsl:template>
+       <xsl:template name="membertypeplurallc">
+               <xsl:param name="name"/>
+               <xsl:choose>
+               <xsl:when test="$name='ExtensionMethod'">extension methods</xsl:when>
+               <xsl:when test="$name='Constructor'">constructors</xsl:when>
+               <xsl:when test="$name='Property'">properties</xsl:when>
+               <xsl:when test="$name='Method'">methods</xsl:when>
+               <xsl:when test="$name='Field'">fields</xsl:when>
+               <xsl:when test="$name='Event'">events</xsl:when>
+               <xsl:when test="$name='Operator'">operators</xsl:when>
+               <xsl:when test="$name='Explicit'">explicitly implemented interface members</xsl:when>
+               </xsl:choose>
+       </xsl:template>
+       <xsl:template name="gettypetype">
+               <xsl:variable name="sig" select="concat(' ', TypeSignature[@Language='C#']/@Value, ' ')"/>
+               <xsl:choose>
+               <xsl:when test="contains($sig,'class')">Class</xsl:when>
+               <xsl:when test="contains($sig,'enum')">Enumeration</xsl:when>
+               <xsl:when test="contains($sig,'struct')">Structure</xsl:when>
+               <xsl:when test="contains($sig,'delegate')">Delegate</xsl:when>
+               </xsl:choose>
+       </xsl:template>
+
+       <!-- Ensures that the resuting node is not surrounded by a para tag. -->
+       <xsl:template match="*|@*" mode="editlink">
+               <xsl:call-template name="CreateEditLink">
+                       <xsl:with-param name="e" select="." />
+               </xsl:call-template>
+       </xsl:template>
+
+       <xsl:template match="*" mode="notoppara">
+               <xsl:choose>
+               <xsl:when test="starts-with (string(.), 'To be added')">
+                       <span class="NotEntered">Documentation for this section has not yet been entered.</span>
+               </xsl:when>
+               <xsl:when test="count(*) = 1 and count(para)=1">
+                       <xsl:apply-templates select="para/node()"/>
+               </xsl:when>
+               <xsl:otherwise>
+                       <xsl:apply-templates select="."/>
+               </xsl:otherwise>
+               </xsl:choose>
+       </xsl:template>
+
+       <xsl:template match="para">
+               <p>
+                       <xsl:apply-templates/>
+               </p>
+       </xsl:template>
+
+       <xsl:template match="paramref">
+               <i><xsl:value-of select="@name"/>
+                               <xsl:apply-templates/>
+               </i>
+       </xsl:template>
+
+       <xsl:template match="typeparamref">
+               <i><xsl:value-of select="@name"/>
+                               <xsl:apply-templates/>
+               </i>
+       </xsl:template>
+
+       <xsl:template match="block[@type='note']">
+               <div>
+               <i>Note: </i>
+                               <xsl:apply-templates/>
+               </div>
+       </xsl:template>
+       <xsl:template match="block[@type='behaviors']">
+               <h5 class="Subsection">Operation</h5>
+               <xsl:apply-templates/>
+       </xsl:template>
+       <xsl:template match="block[@type='overrides']">
+               <h5 class="Subsection">Note to Inheritors</h5>
+               <xsl:apply-templates/>
+       </xsl:template>
+       <xsl:template match="block[@type='usage']">
+               <h5 class="Subsection">Usage</h5>
+               <xsl:apply-templates/>
+       </xsl:template>
+
+       <xsl:template match="c">
+               <tt>
+                       <xsl:apply-templates/>  
+               </tt>
+       </xsl:template>
+       <xsl:template match="c//para">
+               <xsl:apply-templates/><br/>     
+       </xsl:template>
+       
+       <xsl:template match="code">
+               <xsl:call-template name="CreateCodeBlock">
+                       <xsl:with-param name="language" select="@lang" />
+                       <xsl:with-param name="content" select="string(descendant-or-self::text())" />
+               </xsl:call-template>
+       </xsl:template>
+       <xsl:template match="img">
+         <p>
+               <img src="source-id:{$source-id}:{@href}">
+                 <xsl:attribute name="class">
+                       <xsl:choose>
+                         <xsl:when test="count(@class)&gt;0">
+                               <xsl:value-of select="@class" />
+                         </xsl:when>
+                         <xsl:otherwise>picture</xsl:otherwise>
+                       </xsl:choose>
+                 </xsl:attribute>
+               </img>
+         </p>
+       </xsl:template>
+
+       <xsl:template match="onequarter">ยผ</xsl:template>
+       <xsl:template match="pi">ฯ€</xsl:template>
+       <xsl:template match="theta">ฮธ</xsl:template>
+       <xsl:template match="leq">โ‰ค</xsl:template>
+       <xsl:template match="geq">โ‰ฅ</xsl:template>
+       <xsl:template match="subscript">
+               <sub><xsl:value-of select="@term"/></sub>
+       </xsl:template>
+       <xsl:template match="superscript">
+               <sup><xsl:value-of select="@term"/></sup>
+       </xsl:template>
+
+       <!-- tabular data
+               example:
+
+                       <list type="table">
+                               <listheader>
+                                       <term>First Col Header</term>
+                                       <description>Second Col Header</description>
+                                       <description>Third Col Header</description>
+                               </listheader>
+                               <item>
+                                       <term>First Row First Col</term>
+                                       <description>First Row Second Col</description>
+                                       <description>First Row Third Col</description>
+                               </item>
+                               <item>
+                                       <term>Second Row First Col</term>
+                                       <description>Second Row Second Col</description>
+                                       <description>Second Row Third Col</description>
+                               </item>
+                       </list>
+       -->
+
+       <xsl:template match="list[@type='table']">
+               <xsl:call-template name="CreateListTable">
+               <xsl:with-param name="header">
+                       <th><xsl:apply-templates select="listheader/term" mode="notoppara"/></th>
+                       <xsl:for-each select="listheader/description">
+                               <th><xsl:apply-templates mode="notoppara"/></th>
+                       </xsl:for-each>
+               </xsl:with-param>
+
+               <xsl:with-param name="content">
+               <xsl:for-each select="item">
+                       <tr valign="top">
+                       <td>
+                               <xsl:apply-templates select="term" mode="notoppara"/>
+                       </td>
+                       <xsl:for-each select="description">
+                               <td>
+                                       <xsl:apply-templates mode="notoppara"/>
+                               </td>
+                       </xsl:for-each>
+                       </tr>
+               </xsl:for-each>
+               </xsl:with-param>
+               </xsl:call-template>
+       </xsl:template>
+
+       <xsl:template match="list[@type='bullet']">
+               <ul>
+                       <xsl:for-each select="item">
+                               <li>
+                                       <xsl:apply-templates select="term" mode="notoppara"/>
+                               </li>
+                       </xsl:for-each>
+               </ul>
+       </xsl:template>
+       <xsl:template match="list[@type='number']">
+               <ol>
+                       <xsl:for-each select="item">
+                               <li>
+                                       <xsl:apply-templates select="term" mode="notoppara"/>
+                               </li>
+                       </xsl:for-each>
+               </ol>
+       </xsl:template>
+
+       <xsl:template match="list">
+               [<i>The '<xsl:value-of select="@type"/>' type of list has not been implemented in the ECMA stylesheet.</i>]
+               
+               <xsl:message>
+               [<i>The '<xsl:value-of select="@type"/>' type of list has not been implemented in the ECMA stylesheet.</i>]
+               </xsl:message>
+       </xsl:template>
+
+       <xsl:template match="see[@cref]">
+               <xsl:choose>
+               <xsl:when test="not(substring-after(@cref, 'T:')='')">
+                       <xsl:call-template name="maketypelink">
+                               <xsl:with-param name="type" select="normalize-space (@cref)"/>
+                       </xsl:call-template>
+               </xsl:when>
+               <xsl:when test="not(substring-after(@cref, 'N:')='')">
+                       <xsl:call-template name="makenamespacelink">
+                               <xsl:with-param name="cref" select="normalize-space (@cref)"/>
+                       </xsl:call-template>
+               </xsl:when>
+               <xsl:otherwise>
+                       <xsl:call-template name="makememberlink">
+                               <xsl:with-param name="cref" select="normalize-space (@cref)"/>
+                       </xsl:call-template>
+               </xsl:otherwise>
+               </xsl:choose>
+       </xsl:template>
+
+       <xsl:template match="see[@langword]">
+               <tt><xsl:value-of select="@langword"/></tt>
+       </xsl:template>
+       
+       <xsl:template name="GetInheritedMembers">
+               <xsl:param name="declaringtype"/>
+               <xsl:param name="generictypereplacements"/>
+               <xsl:param name="listmembertype"/>
+               <xsl:param name="showprotected"/>
+               <xsl:param name="overloads-mode" select="false()" />
+               <xsl:param name="showstatic" select='1'/>
+
+               <xsl:choose>
+               <xsl:when test="$listmembertype='ExtensionMethod' and $showprotected=false()">
+                       <xsl:for-each select="$declaringtype/Members/Member[MemberType=$listmembertype]">
+                               <Members Name="Link/@Type" FullName="Link/@Type">
+                                       <Member MemberName="{@MemberName}">
+                                               <xsl:attribute name="ExplicitMemberName">
+                                                       <xsl:call-template name="GetMemberNameWithoutGenericTypes">
+                                                               <xsl:with-param name="m" select="@MemberName" />
+                                                       </xsl:call-template>
+                                               </xsl:attribute>
+                                               <xsl:attribute name="TypeParameters">
+                                                       <xsl:call-template name="GetTypeParameterNames">
+                                                               <xsl:with-param name="member" select="." />
+                                                       </xsl:call-template>
+                                               </xsl:attribute>
+                                               <xsl:attribute name="Parameters">
+                                                       <xsl:call-template name="GetParameterTypes">
+                                                               <xsl:with-param name="member" select="." />
+                                                       </xsl:call-template>
+                                               </xsl:attribute>
+                                               <xsl:copy-of select="./*" />
+                                       </Member>
+                               </Members>
+                       </xsl:for-each>
+               </xsl:when>
+               <xsl:otherwise>
+               <Members Name="{$declaringtype/@Name}" FullName="{$declaringtype/@FullName}">
+               
+               <xsl:copy-of select="$generictypereplacements"/>
+
+               <!-- Get all members in this type that are of listmembertype and are either
+                       protected or not protected according to showprotected. -->
+               <xsl:choose>
+                       <xsl:when test="$listmembertype = 'Explicit'">
+                               <xsl:for-each select="$declaringtype/Members/Member
+                                               [MemberType != 'Constructor']
+                                               [contains (@MemberName, '.')]">
+                                       <Member MemberName="{@MemberName}">
+                                               <xsl:attribute name="ExplicitMemberName">
+                                                       <xsl:call-template name="GetMemberName">
+                                                               <xsl:with-param name="type" select="@MemberName" />
+                                                               <xsl:with-param name="isproperty" select="$listmembertype = 'Property'"/>
+                                                       </xsl:call-template>
+                                               </xsl:attribute>
+                                               <xsl:attribute name="TypeParameters">
+                                                       <xsl:call-template name="GetTypeParameterNames">
+                                                               <xsl:with-param name="member" select="." />
+                                                       </xsl:call-template>
+                                               </xsl:attribute>
+                                               <xsl:attribute name="Parameters">
+                                                       <xsl:call-template name="GetParameterTypes">
+                                                               <xsl:with-param name="member" select="." />
+                                                       </xsl:call-template>
+                                               </xsl:attribute>
+                                               <xsl:copy-of select="./*" />
+                                       </Member>
+                               </xsl:for-each>
+                       </xsl:when>
+                       <xsl:otherwise>
+                               <xsl:for-each select="$declaringtype/Members/Member
+                                       [(MemberType=$listmembertype or ($listmembertype='Operator' and MemberType='Method'))]
+                                       [(not($overloads-mode) or @MemberName=$index or 
+                                               ($index='Conversion' and (@MemberName='op_Implicit' or @MemberName='op_Explicit'))) ]
+                                       [$showprotected=starts-with(MemberSignature[@Language='C#']/@Value, 'protected ')]
+                                       [($listmembertype='Method' and not(starts-with(@MemberName,'op_')))
+                                               or ($listmembertype='Operator' and starts-with(@MemberName,'op_'))
+                                               or (not($listmembertype='Method') and not($listmembertype='Operator'))]
+                                       [$showstatic or not(contains(MemberSignature[@Language='C#']/@Value,' static '))]
+                                       [$listmembertype = 'Constructor' or not(contains(@MemberName, '.'))]
+                                       ">
+                                       <Member MemberName="{@MemberName}">
+                                               <xsl:attribute name="ExplicitMemberName">
+                                                       <xsl:call-template name="GetMemberNameWithoutGenericTypes">
+                                                               <xsl:with-param name="m" select="@MemberName" />
+                                                       </xsl:call-template>
+                                               </xsl:attribute>
+                                               <xsl:attribute name="TypeParameters">
+                                                       <xsl:call-template name="GetTypeParameterNames">
+                                                               <xsl:with-param name="member" select="." />
+                                                       </xsl:call-template>
+                                               </xsl:attribute>
+                                               <xsl:attribute name="Parameters">
+                                                       <xsl:call-template name="GetParameterTypes">
+                                                               <xsl:with-param name="member" select="." />
+                                                       </xsl:call-template>
+                                               </xsl:attribute>
+                                               <xsl:copy-of select="./*" />
+                                       </Member>
+                               </xsl:for-each>
+                       </xsl:otherwise>
+               </xsl:choose>
+
+               <Docs>
+                       <xsl:copy-of select="$declaringtype/Docs/typeparam" />
+               </Docs>
+                       
+               </Members>
+               </xsl:otherwise>
+               </xsl:choose>
+
+               <xsl:if test="not($listmembertype='Constructor') and count($declaringtype/Base/BaseTypeName)=1">
+                       <xsl:variable name="basedocsfile">
+                               <xsl:call-template name="GetLinkTarget">
+                                       <xsl:with-param name="type">
+                                               <xsl:call-template name="GetEscapedTypeName">
+                                                       <xsl:with-param name="typename" select="$declaringtype/Base/BaseTypeName" />
+                                               </xsl:call-template>
+                                       </xsl:with-param>
+                                       <xsl:with-param name="cref">
+                                       </xsl:with-param>
+                                       <xsl:with-param name="local-suffix" />
+                                       <xsl:with-param name="remote"/>
+                                       <xsl:with-param name="xmltarget" select='1'/>
+                               </xsl:call-template>
+                       </xsl:variable>
+
+                       <xsl:if test="not(string($basedocsfile) = '')">
+                               <xsl:call-template name="GetInheritedMembers">
+                                       <xsl:with-param name="listmembertype" select="$listmembertype"/>
+                                       <xsl:with-param name="showprotected" select="$showprotected"/>
+                                       <xsl:with-param name="declaringtype" select="document(string($basedocsfile),.)/Type"/>
+                                       <xsl:with-param name="generictypereplacements" select="$declaringtype/Base/BaseTypeArguments/*"/>
+                                       <xsl:with-param name="showstatic" select='0'/>
+                               </xsl:call-template>
+                       </xsl:if>
+               </xsl:if>
+       </xsl:template>
+
+       <xsl:template name="GetMemberNameWithoutGenericTypes">
+               <xsl:param name="m" />
+               <xsl:choose>
+                       <xsl:when test="contains ($m, '&lt;')">
+                               <xsl:value-of select="substring-before ($m, '&lt;')" />
+                       </xsl:when>
+                       <xsl:otherwise>
+                               <xsl:value-of select="$m" />
+                       </xsl:otherwise>
+               </xsl:choose>
+       </xsl:template>
+       
+       <xsl:template name="GetTypeParameterNames">
+               <xsl:param name="member" />
+
+               <xsl:for-each select="$member/TypeParameters/TypeParameter">
+                       <xsl:if test="not(position()=1)">, </xsl:if>
+                       <xsl:value-of select="@Name" />
+               </xsl:for-each>
+       </xsl:template>
+       
+       <xsl:template name="GetParameterTypes">
+               <xsl:param name="member" />
+
+               <xsl:for-each select="$member/Parameters/Parameter">
+                       <xsl:if test="not(position()=1)">, </xsl:if>
+                       <xsl:value-of select="@Type" />
+               </xsl:for-each>
+       </xsl:template>
+       
+       <xsl:template name="ListAllMembers">
+               <xsl:param name="html-anchor" select="false()" />
+
+               <xsl:call-template name="ListMembers">
+                       <xsl:with-param name="listmembertype" select="'Constructor'"/>
+                       <xsl:with-param name="showprotected" select="false()"/>
+                       <xsl:with-param name="html-anchor" select="$html-anchor" />
+               </xsl:call-template>
+
+               <xsl:call-template name="ListMembers">
+                       <xsl:with-param name="listmembertype" select="'Constructor'"/>
+                       <xsl:with-param name="showprotected" select="true()"/>
+                       <xsl:with-param name="html-anchor" select="$html-anchor" />
+               </xsl:call-template>
+
+               <xsl:call-template name="ListMembers">
+                       <xsl:with-param name="listmembertype" select="'Field'"/>
+                       <xsl:with-param name="showprotected" select="false()"/>
+                       <xsl:with-param name="html-anchor" select="$html-anchor" />
+               </xsl:call-template>
+
+               <xsl:call-template name="ListMembers">
+                       <xsl:with-param name="listmembertype" select="'Field'"/>
+                       <xsl:with-param name="showprotected" select="true()"/>
+                       <xsl:with-param name="html-anchor" select="$html-anchor" />
+               </xsl:call-template>
+
+               <xsl:call-template name="ListMembers">
+                       <xsl:with-param name="listmembertype" select="'Property'"/>
+                       <xsl:with-param name="showprotected" select="false()"/>
+                       <xsl:with-param name="html-anchor" select="$html-anchor" />
+               </xsl:call-template>
+
+               <xsl:call-template name="ListMembers">
+                       <xsl:with-param name="listmembertype" select="'Property'"/>
+                       <xsl:with-param name="showprotected" select="true()"/>
+                       <xsl:with-param name="html-anchor" select="$html-anchor" />
+               </xsl:call-template>
+
+               <xsl:call-template name="ListMembers">
+                       <xsl:with-param name="listmembertype" select="'Method'"/>
+                       <xsl:with-param name="showprotected" select="false()"/>
+                       <xsl:with-param name="html-anchor" select="$html-anchor" />
+               </xsl:call-template>
+
+               <xsl:call-template name="ListMembers">
+                       <xsl:with-param name="listmembertype" select="'Method'"/>
+                       <xsl:with-param name="showprotected" select="true()"/>
+                       <xsl:with-param name="html-anchor" select="$html-anchor" />
+               </xsl:call-template>
+
+               <xsl:call-template name="ListMembers">
+                       <xsl:with-param name="listmembertype" select="'Event'"/>
+                       <xsl:with-param name="showprotected" select="false()"/>
+                       <xsl:with-param name="html-anchor" select="$html-anchor" />
+               </xsl:call-template>
+
+               <xsl:call-template name="ListMembers">
+                       <xsl:with-param name="listmembertype" select="'Event'"/>
+                       <xsl:with-param name="showprotected" select="true()"/>
+                       <xsl:with-param name="html-anchor" select="$html-anchor" />
+               </xsl:call-template>
+
+               <xsl:call-template name="ListMembers">
+                       <xsl:with-param name="listmembertype" select="'Operator'"/>
+                       <xsl:with-param name="showprotected" select="false()"/>
+                       <xsl:with-param name="html-anchor" select="$html-anchor" />
+               </xsl:call-template>
+
+               <xsl:call-template name="ListMembers">
+                       <xsl:with-param name="listmembertype" select="'Explicit'"/>
+                       <xsl:with-param name="showprotected" select="true()"/>
+                       <xsl:with-param name="html-anchor" select="$html-anchor" />
+               </xsl:call-template>
+
+               <xsl:call-template name="ListMembers">
+                       <xsl:with-param name="listmembertype" select="'ExtensionMethod'"/>
+                       <xsl:with-param name="showprotected" select="false()"/>
+                       <xsl:with-param name="html-anchor" select="$html-anchor" />
+               </xsl:call-template>
+       </xsl:template>
+
+       <!-- Lists the members in the current Type node.
+                Only lists members of type listmembertype.
+                Displays the signature in siglanguage.
+                showprotected = true() or false()
+       -->
+       <xsl:template name="ListMembers">
+               <xsl:param name="listmembertype"/>
+               <xsl:param name="showprotected"/>
+               <xsl:param name="overloads-mode" select="false()" />
+               <xsl:param name="html-anchor" select="false()" />
+
+               <!-- get name and namespace of current type -->
+               <xsl:variable name="TypeFullName" select="@FullName"/>
+               <xsl:variable name="TypeName" select="@Name"/>          
+               <xsl:variable name="TypeNamespace" select="substring-before(@FullName, concat('.',@Name))"/>
+               
+               <xsl:variable name="MEMBERS-rtf">
+                       <xsl:call-template name="GetInheritedMembers">
+                               <xsl:with-param name="listmembertype" select="$listmembertype"/>
+                               <xsl:with-param name="showprotected" select="$showprotected"/>
+                               <xsl:with-param name="declaringtype" select="."/>
+                               <xsl:with-param name="overloads-mode" select="$overloads-mode" />
+                       </xsl:call-template>
+               </xsl:variable>
+               <xsl:variable name="MEMBERS" select="msxsl:node-set($MEMBERS-rtf)" />
+               
+               <!--
+               <xsl:variable name="MEMBERS" select="
+                       $ALLMEMBERS/Member
+                       [(MemberType=$listmembertype or ($listmembertype='Operator' and MemberType='Method'))]
+                       [$showprotected=contains(MemberSignature[@Language='C#']/@Value,'protected')]
+                       [($listmembertype='Method' and not(starts-with(@MemberName,'op_')))
+                               or ($listmembertype='Operator' and starts-with(@MemberName,'op_'))
+                               or (not($listmembertype='Method') and not($listmembertype='Operator'))]
+                       "/>
+               -->
+               
+               <!-- if there aren't any, skip this -->
+               <xsl:if test="count($MEMBERS//Member)">
+
+               <xsl:variable name="SectionName">
+                       <xsl:if test="$listmembertype != 'Explicit' and $listmembertype != 'ExtensionMethod'">
+                               <xsl:if test="$showprotected">Protected </xsl:if>
+                               <xsl:if test="not($showprotected)">Public </xsl:if>
+                       </xsl:if>
+                       <xsl:call-template name="membertypeplural"><xsl:with-param name="name" select="$listmembertype"/></xsl:call-template>
+               </xsl:variable>
+
+               <!-- header -->
+               <xsl:call-template name="CreateH2Section">
+                       <xsl:with-param name="name" select="$SectionName" />
+                       <xsl:with-param name="child-id" select="$SectionName" />
+                       <xsl:with-param name="content">
+                               <div class="SubsectionBox">
+                               <xsl:call-template name="CreateMembersTable">
+                               <xsl:with-param name="content">
+
+                               <xsl:for-each select="$MEMBERS/Members/Member">
+                                       <!--<xsl:sort select="contains(MemberSignature[@Language='C#']/@Value,' static ')" data-type="text"/>-->
+                                       <xsl:sort select="@MemberName = 'op_Implicit' or @MemberName = 'op_Explicit'"/>
+                                       <xsl:sort select="@ExplicitMemberName" data-type="text"/>
+                                       <xsl:sort select="count(TypeParameters/TypeParameter)"/>
+                                       <xsl:sort select="@TypeParameters"/>
+                                       <xsl:sort select="count(Parameters/Parameter)"/>
+                                       <xsl:sort select="@Parameters"/>
+                                       
+                                       <xsl:variable name="local-id">
+                                               <xsl:choose>
+                                                       <xsl:when test="count(Link) = 1">
+                                                               <xsl:value-of select="Link/@Member" />
+                                                       </xsl:when>
+                                                       <xsl:otherwise>
+                                                               <xsl:call-template name="GetLinkId" >
+                                                                       <xsl:with-param name="type" select="parent::Members" />
+                                                                       <xsl:with-param name="member" select="." />
+                                                               </xsl:call-template>
+                                                       </xsl:otherwise>
+                                               </xsl:choose>
+                                       </xsl:variable>
+
+                                       <xsl:variable name="linkfile">
+                                               <xsl:if test="not(parent::Members/@FullName = $TypeFullName)">
+                                                       <xsl:call-template name="GetLinkTargetHtml">
+                                                               <xsl:with-param name="type">
+                                                                       <xsl:choose>
+                                                                               <xsl:when test="count(Link) = 1">
+                                                                                       <xsl:value-of select="Link/@Type"/>
+                                                                               </xsl:when>
+                                                                               <xsl:otherwise>
+                                                                                       <xsl:call-template name="GetEscapedTypeName">
+                                                                                               <xsl:with-param name="typename" select="parent::Members/@FullName" />
+                                                                                       </xsl:call-template>
+                                                                               </xsl:otherwise>
+                                                                       </xsl:choose>
+                                                               </xsl:with-param>
+                                                               <xsl:with-param name="cref" />
+                                                       </xsl:call-template>
+                                               </xsl:if>
+                                       </xsl:variable>
+
+                                       <xsl:variable name="linkid">
+                                               <xsl:if test="$html-anchor">
+                                                       <xsl:value-of select="$linkfile" />
+                                                       <xsl:text>#</xsl:text>
+                                               </xsl:if>
+                                               <xsl:value-of select="$local-id" />
+                                       </xsl:variable>
+                                       
+                                       <xsl:variable name="isinherited">
+                                               <xsl:if test="$listmembertype != 'ExtensionMethod' and not(parent::Members/@FullName = $TypeFullName)">
+                                                       <xsl:text> (</xsl:text>
+                                                       <i>
+                                                       <xsl:text>Inherited from </xsl:text>
+                                                       <xsl:call-template name="maketypelink">
+                                                               <xsl:with-param name="type" select="parent::Members/@FullName"/>
+                                                               <xsl:with-param name="wrt" select="$TypeNamespace"/>
+                                                       </xsl:call-template>
+                                                       <xsl:text>.</xsl:text>
+                                                       </i>
+                                                       <xsl:text>)</xsl:text>
+                                               </xsl:if>
+                                       </xsl:variable>
+
+                                       <tr valign="top">
+                                               <td>
+                                                       <!-- random info -->
+
+                                                       <!-- check if it has get and set accessors -->
+                                                       <xsl:if test="MemberType='Property' and not(contains(MemberSignature[@Language='C#']/@Value, 'set;'))">
+                                                               <xsl:text>[read-only]</xsl:text>
+                                                       </xsl:if>
+                                                       <xsl:if test="MemberType='Property' and not(contains(MemberSignature[@Language='C#']/@Value, 'get;'))">
+                                                               <xsl:text>[write-only]</xsl:text>
+                                                       </xsl:if>
+
+                                                       <xsl:if test="contains(MemberSignature[@Language='C#']/@Value,'this[')">
+                                                               <div><i>default property</i></div>
+                                                       </xsl:if>
+
+                                                       <div>
+                                                       <xsl:call-template name="getmodifiers">
+                                                               <xsl:with-param name="sig" select="MemberSignature[@Language='C#']/@Value"/>
+                                                               <xsl:with-param name="protection" select="false()"/>
+                                                               <xsl:with-param name="inheritance" select="true()"/>
+                                                               <xsl:with-param name="extra" select="false()"/>
+                                                       </xsl:call-template>
+                                                       </div>
+                                               </td>
+
+                                       <xsl:choose>
+                                               <!-- constructor listing -->
+                                               <xsl:when test="MemberType='Constructor'">
+                                                       <!-- link to constructor page -->
+                                                       <td>
+                                                       <div>
+                                                       <b>
+                                                       <a href="{$linkid}">
+                                                               <xsl:call-template name="GetConstructorName">
+                                                                       <xsl:with-param name="type" select="parent::Members" />
+                                                                       <xsl:with-param name="ctor" select="." />
+                                                               </xsl:call-template>
+                                                       </a>
+                                                       </b>
+
+                                                       <!-- argument list -->
+                                                       <xsl:value-of select="'('"/>
+                                                               <xsl:for-each select="Parameters/Parameter">
+                                                                       <xsl:if test="not(position()=1)">, </xsl:if>
+                                                                       
+                                                                       <xsl:call-template name="ShowParameter">
+                                                                               <xsl:with-param name="Param" select="."/>
+                                                                               <xsl:with-param name="TypeNamespace" select="$TypeNamespace"/>
+                                                                               <xsl:with-param name="prototype" select="true()"/>
+                                                                       </xsl:call-template>
+                                                               </xsl:for-each>
+                                                       <xsl:value-of select="')'"/>
+                                                       </div>
+
+                                                       <!-- TODO: $implemented? -->
+
+                                                       </td>
+                                               </xsl:when>
+
+                                               <xsl:when test="$listmembertype = 'Explicit'">
+                                                       <td>
+                                                               <a href="{$linkid}">
+                                                                       <b>
+                                                                               <xsl:call-template name="GetMemberDisplayName">
+                                                                                       <xsl:with-param name="memberName" select="@MemberName" />
+                                                                                       <xsl:with-param name="isproperty" select="MemberType='Property'" />
+                                                                               </xsl:call-template>
+                                                                       </b>
+                                                               </a>
+                                                       </td>
+                                               </xsl:when>
+
+                                               <!-- field, property and event listing -->
+                                               <xsl:when test="MemberType='Field' or MemberType='Property' or MemberType='Event'">
+                                                       <td>
+
+                                                       <!-- link to member page -->
+                                                       <b>
+                                                       <a href="{$linkid}">
+                                                               <xsl:call-template name="GetMemberDisplayName">
+                                                                       <xsl:with-param name="memberName" select="@MemberName" />
+                                                                       <xsl:with-param name="isproperty" select="MemberType='Property'" />
+                                                               </xsl:call-template>
+                                                       </a>
+                                                       </b>
+
+                                                       <!-- argument list for accessors -->
+                                                       <xsl:if test="Parameters/Parameter">
+                                                       <xsl:value-of select="'('"/>
+                                                               <xsl:for-each select="Parameters/Parameter">
+                                                                       <xsl:if test="not(position()=1)">, </xsl:if>
+                                                                       
+                                                                       <xsl:call-template name="ShowParameter">
+                                                                               <xsl:with-param name="Param" select="."/>
+                                                                               <xsl:with-param name="TypeNamespace" select="$TypeNamespace"/>
+                                                                               <xsl:with-param name="prototype" select="true()"/>
+                                                                       </xsl:call-template>
+
+                                                               </xsl:for-each>
+                                                       <xsl:value-of select="')'"/>
+                                                       </xsl:if>
+
+                                                       </td>
+                                               </xsl:when>
+
+                                               <!-- method listing -->
+                                               <xsl:when test="$listmembertype='Method' or $listmembertype = 'ExtensionMethod'">
+                                                       <td colspan="2">
+
+                                                       <!-- link to method page -->
+                                                       <b>
+                                                       <a href="{$linkid}">
+                                                               <xsl:call-template name="GetMemberDisplayName">
+                                                                       <xsl:with-param name="memberName" select="@MemberName" />
+                                                                       <xsl:with-param name="isproperty" select="MemberType='Property'" />
+                                                               </xsl:call-template>
+                                                       </a>
+                                                       </b>
+
+                                                       <!-- argument list -->
+                                                       <xsl:value-of select="'('"/>
+                                                               <xsl:for-each select="Parameters/Parameter">
+                                                                       <xsl:if test="not(position()=1)">, </xsl:if>
+                                                                       
+                                                                       <xsl:call-template name="ShowParameter">
+                                                                               <xsl:with-param name="Param" select="."/>
+                                                                               <xsl:with-param name="TypeNamespace" select="$TypeNamespace"/>
+                                                                               <xsl:with-param name="prototype" select="true()"/>
+                                                                       </xsl:call-template>
+
+                                                               </xsl:for-each>
+                                                       <xsl:value-of select="')'"/>
+
+                                                       <!-- return type -->
+                                                       <xsl:if test="not(ReturnValue/ReturnType='System.Void')">
+                                                               <nobr>
+                                                               <xsl:text> : </xsl:text>
+                                                               <xsl:apply-templates select="ReturnValue/ReturnType" mode="typelink"><xsl:with-param name="wrt" select="$TypeNamespace"/></xsl:apply-templates>
+                                                               </nobr>
+                                                       </xsl:if>
+
+                                                       <blockquote>
+                                                               <xsl:apply-templates select="Docs/summary" mode="notoppara"/>
+                                                               <xsl:copy-of select="$isinherited"/>
+                                                       </blockquote>
+                                                       </td>
+                                               </xsl:when>
+
+                                               <xsl:when test="$listmembertype='Operator'">
+                                                       <td>
+
+                                                       <!-- link to operator page -->
+                                                       <xsl:choose>
+                                                       <xsl:when test="@MemberName='op_Implicit' or @MemberName='op_Explicit'">
+                                                               <b>
+                                                               <a href="{$linkid}">
+                                                                       <xsl:text>Conversion</xsl:text>
+                                                                       <xsl:choose>
+                                                                       <xsl:when test="ReturnValue/ReturnType = //Type/@FullName">
+                                                                               <xsl:text> From </xsl:text>
+                                                                               <xsl:value-of select="Parameters/Parameter/@Type"/>
+                                                                       </xsl:when>
+                                                                       <xsl:otherwise>
+                                                                               <xsl:text> to </xsl:text>
+                                                                               <xsl:value-of select="ReturnValue/ReturnType"/>
+                                                                       </xsl:otherwise>
+                                                                       </xsl:choose>
+                                                               </a>
+                                                               </b>                                            
+
+                                                               <xsl:choose>
+                                                               <xsl:when test="@MemberName='op_Implicit'">
+                                                                       <xsl:text>(Implicit)</xsl:text>
+                                                               </xsl:when>
+                                                               <xsl:otherwise>
+                                                                       <xsl:text>(Explicit)</xsl:text>
+                                                               </xsl:otherwise>
+                                                               </xsl:choose>
+                                                       </xsl:when>
+                                                       <xsl:when test="count(Parameters/Parameter)=1">
+                                                               <b>
+                                                               <a href="{$linkid}">
+                                                                       <xsl:value-of select="substring-after(@MemberName, 'op_')"/>
+                                                               </a>
+                                                               </b>
+                                                       </xsl:when>
+                                                       <xsl:otherwise>
+                                                               <b>
+                                                               <a href="{$linkid}">
+                                                                       <xsl:value-of select="substring-after(@MemberName, 'op_')"/>
+                                                               </a>
+                                                               </b>
+                                                               <xsl:value-of select="'('"/>
+                                                                       <xsl:for-each select="Parameters/Parameter">
+                                                                               <xsl:if test="not(position()=1)">, </xsl:if>
+                                                                               
+                                                                               <xsl:call-template name="ShowParameter">
+                                                                                       <xsl:with-param name="Param" select="."/>
+                                                                                       <xsl:with-param name="TypeNamespace" select="$TypeNamespace"/>
+                                                                                       <xsl:with-param name="prototype" select="true()"/>
+                                                                               </xsl:call-template>
+                       
+                                                                       </xsl:for-each>
+                                                               <xsl:value-of select="')'"/>
+                                                       </xsl:otherwise>
+                                                       </xsl:choose>
+                                                       </td>
+                                               </xsl:when>
+                                               
+                                               <xsl:otherwise>
+                                                       <!-- Other types: just provide a link -->
+                                                       <td>
+                                                       <a href="{$linkid}">
+                                                               <xsl:call-template name="GetMemberDisplayName">
+                                                                       <xsl:with-param name="memberName" select="@MemberName" />
+                                                                       <xsl:with-param name="isproperty" select="MemberType='Property'" />
+                                                               </xsl:call-template>
+                                                       </a>
+                                                       </td>
+                                               </xsl:otherwise>
+                                       </xsl:choose>
+
+                                       <xsl:if test="$listmembertype != 'Method' and $listmembertype != 'ExtensionMethod'">
+                                               <td>
+                                                       <!-- description -->
+                                                       <xsl:if test="MemberType='Field' or MemberType = 'Property'">
+                                                               <i><xsl:apply-templates select="ReturnValue/ReturnType" mode="typelink"><xsl:with-param name="wrt" select="$TypeNamespace"/></xsl:apply-templates></i>
+                                                               <xsl:if test="MemberValue"> (<xsl:value-of select="MemberValue"/>)</xsl:if>
+                                                               <xsl:text>. </xsl:text>
+                                                       </xsl:if>
+
+                                                       <xsl:apply-templates select="Docs/summary" mode="notoppara"/>
+                                                       <xsl:copy-of select="$isinherited"/>
+                                               </td>
+                                       </xsl:if>
+                                       
+                                       </tr>
+                               </xsl:for-each>
+
+                               </xsl:with-param>
+                               </xsl:call-template>
+                               </div>
+                       </xsl:with-param>
+               </xsl:call-template>
+
+               </xsl:if>
+
+       </xsl:template>
+
+       <xsl:template name="GetLinkName">
+               <xsl:param name="type"/>
+               <xsl:param name="member"/>
+               <xsl:call-template name="memberlinkprefix">
+                       <xsl:with-param name="member" select="$member"/>
+               </xsl:call-template>
+               <xsl:text>:</xsl:text>
+               <xsl:call-template name="GetEscapedTypeName">
+                       <xsl:with-param name="typename" select="$type/@FullName" />
+               </xsl:call-template>
+               <xsl:if test="$member/MemberType != 'Constructor'">
+                       <xsl:text>.</xsl:text>
+                       <xsl:variable name="memberName">
+                               <xsl:call-template name="GetGenericName">
+                                       <xsl:with-param name="membername" select="$member/@MemberName" />
+                                       <xsl:with-param name="member" select="$member" />
+                               </xsl:call-template>
+                       </xsl:variable>
+                       <xsl:call-template name="Replace">
+                               <xsl:with-param name="s">
+                                       <xsl:call-template name="ToBraces">
+                                               <xsl:with-param name="s" select="$memberName" />
+                                       </xsl:call-template>
+                               </xsl:with-param>
+                               <xsl:with-param name="from">.</xsl:with-param>
+                               <xsl:with-param name="to">#</xsl:with-param>
+                       </xsl:call-template>
+               </xsl:if>
+       </xsl:template>
+
+       <xsl:template name="GetGenericName">
+               <xsl:param name="membername" />
+               <xsl:param name="member" />
+               <xsl:variable name="numgenargs" select="count($member/Docs/typeparam)" />
+               <xsl:choose>
+                       <xsl:when test="$numgenargs = 0">
+                               <xsl:value-of select="$membername" />
+                       </xsl:when>
+                       <xsl:otherwise>
+                               <xsl:if test="contains($membername, '&lt;')">
+                                       <xsl:value-of select="substring-before ($membername, '&lt;')" />
+                               </xsl:if>
+                               <xsl:text>``</xsl:text>
+                               <xsl:value-of select="$numgenargs" />
+                       </xsl:otherwise>
+               </xsl:choose>
+       </xsl:template>
+
+       <xsl:template name="GetEscapedTypeName">
+               <xsl:param name="typename" />
+               <xsl:variable name="base" select="substring-before ($typename, '&lt;')" />
+
+               <xsl:choose>
+                       <xsl:when test="$base != ''">
+                               <xsl:value-of select="translate ($base, '+', '.')" />
+                               <xsl:text>`</xsl:text>
+                               <xsl:call-template name="GetGenericArgumentCount">
+                                       <xsl:with-param name="arglist" select="substring-after ($typename, '&lt;')" />
+                                       <xsl:with-param name="count">1</xsl:with-param>
+                               </xsl:call-template>
+                       </xsl:when>
+                       <xsl:otherwise><xsl:value-of select="translate ($typename, '+', '.')" /></xsl:otherwise>
+               </xsl:choose>
+       </xsl:template>
+
+       <xsl:template name="GetGenericArgumentCount">
+               <xsl:param name="arglist" />
+               <xsl:param name="count" />
+
+               <xsl:variable name="rest-rtf">
+                       <xsl:call-template name="SkipTypeArgument">
+                               <xsl:with-param name="s" select="$arglist" />
+                       </xsl:call-template>
+               </xsl:variable>
+               <xsl:variable name="rest" select="string($rest-rtf)" />
+
+               <xsl:choose>
+                       <xsl:when test="$arglist != '' and $rest = ''">
+                               <xsl:value-of select="$count" />
+                       </xsl:when>
+                       <xsl:when test="$arglist = '' and $rest = ''">
+                               <xsl:message terminate="yes">
+!WTF? arglist=<xsl:value-of select="$arglist" />; rest=<xsl:value-of select="$rest" />
+                               </xsl:message>
+                       </xsl:when>
+                       <xsl:when test="starts-with ($rest, '>')">
+                               <xsl:value-of select="$count" />
+                               <xsl:call-template name="GetEscapedTypeName">
+                                       <xsl:with-param name="typename" select="substring-after ($rest, '>')" />
+                               </xsl:call-template>
+                       </xsl:when>
+                       <xsl:when test="starts-with ($rest, ',')">
+                               <xsl:call-template name="GetGenericArgumentCount">
+                                       <xsl:with-param name="arglist" select="substring-after ($rest, ',')" />
+                                       <xsl:with-param name="count" select="$count+1" />
+                               </xsl:call-template>
+                       </xsl:when>
+                       <xsl:otherwise>
+                               <xsl:message terminate="yes">
+!WTF 2? arglist=<xsl:value-of select="$arglist" />; rest=<xsl:value-of select="$rest" />
+                               </xsl:message>
+                       </xsl:otherwise>
+               </xsl:choose>
+       </xsl:template>
+
+       <xsl:template name="SkipTypeArgument">
+               <xsl:param name="s" />
+
+               <xsl:variable name="p-rtf">
+                       <xsl:call-template name="GetCLtGtPositions">
+                               <xsl:with-param name="s" select="$s" />
+                       </xsl:call-template>
+               </xsl:variable>
+               <xsl:variable name="p" select="msxsl:node-set($p-rtf)"/>
+
+               <xsl:choose>
+                       <!--
+                       Have to select between three `s' patterns:
+                       A,B>: need to return ",B>"
+                       Foo<A,B>>: Need to forward to SkipGenericArgument to eventually return ">"
+                       Foo<A,B>+C>: Need to forward to SkipGenericArgument to eventually return ">"
+                       -->
+                       <xsl:when test="starts-with ($s, '>')">
+                               <xsl:message terminate="yes">
+SkipTypeArgument: invalid type substring '<xsl:value-of select="$s" />'
+                               </xsl:message>
+                       </xsl:when>
+                       <xsl:when test="$p/Comma/@Length > 0 and 
+                                       ($p/Lt/@Length = 0 or $p/Comma/@Length &lt; $p/Lt/@Length) and 
+                                       ($p/Gt/@Length > 0 and $p/Comma/@Length &lt; $p/Gt/@Length)">
+                               <xsl:text>,</xsl:text>
+                               <xsl:value-of select="substring-after ($s, ',')" />
+                       </xsl:when>
+                       <xsl:when test="$p/Lt/@Length > 0 and $p/Lt/@Length &lt; $p/Gt/@Length">
+                               <xsl:variable name="r">
+                                       <xsl:call-template name="SkipGenericArgument">
+                                               <xsl:with-param name="s" select="substring-after ($s, '&lt;')" />
+                                       </xsl:call-template>
+                               </xsl:variable>
+                               <xsl:choose>
+                                       <xsl:when test="starts-with ($r, '>') or starts-with ($r, '+')">
+                                               <xsl:value-of select="substring-after ($r, '&gt;')" />
+                                       </xsl:when>
+                                       <xsl:when test="starts-with ($r, ',')">
+                                               <xsl:value-of select="$r" />
+                                       </xsl:when>
+                                       <xsl:otherwise>
+                                               <xsl:message>
+! WTF3: s=<xsl:value-of select="$s" />; r=<xsl:value-of select="$r" />
+                                               </xsl:message>
+                                       </xsl:otherwise>
+                               </xsl:choose>
+                       </xsl:when>
+                       <xsl:when test="$p/Gt/@Length > 0">
+                               <xsl:text>&gt;</xsl:text>
+                               <xsl:value-of select="substring-after ($s, '&gt;')" />
+                       </xsl:when>
+                       <xsl:otherwise><xsl:value-of select="$s" /></xsl:otherwise>
+               </xsl:choose>
+       </xsl:template>
+
+       <xsl:template name="GetCLtGtPositions">
+               <xsl:param name="s" />
+
+               <xsl:variable name="c"  select="substring-before ($s, ',')" />
+               <xsl:variable name="lt" select="substring-before ($s, '&lt;')" />
+               <xsl:variable name="gt" select="substring-before ($s, '&gt;')" />
+
+                       <Comma String="{$c}" Length="{string-length ($c)}" />
+                       <Lt String="{$lt}" Length="{string-length ($lt)}" />
+                       <Gt String="{$gt}" Length="{string-length ($gt)}" />
+       </xsl:template>
+
+       <!--
+       when given 'Foo<A,Bar<Baz<C,D,E>>>>', returns '>'
+       when given 'Bar<C>+Nested>', returns '>'
+       when given 'Foo<A,Bar<Baz<C,D,E>>>,', returns ','
+       (basically, it matches '<' to '>' and "skips" the intermediate type-name contents.
+         -->
+       <xsl:template name="SkipGenericArgument">
+               <xsl:param name="s" />
+
+               <xsl:variable name="p-rtf">
+                       <xsl:call-template name="GetCLtGtPositions">
+                               <xsl:with-param name="s" select="$s" />
+                       </xsl:call-template>
+               </xsl:variable>
+               <xsl:variable name="p" select="msxsl:node-set($p-rtf)" />
+
+               <xsl:choose>
+                       <xsl:when test="starts-with ($s, '>')">
+                               <xsl:message terminate="yes">
+SkipGenericArgument: invalid type substring '<xsl:value-of select="$s" />'
+                               </xsl:message>
+                       </xsl:when>
+                       <xsl:when test="$p/Lt/@Length > 0 and $p/Lt/@Length &lt; $p/Gt/@Length">
+                               <!-- within 'Foo<A...'; look for matching '>' -->
+                               <xsl:variable name="r">
+                                       <xsl:call-template name="SkipGenericArgument">
+                                               <xsl:with-param name="s" select="substring-after ($s, '&lt;')" />
+                                       </xsl:call-template>
+                               </xsl:variable>
+                               <xsl:value-of select="substring-after ($r, '&gt;')" />
+                       </xsl:when>
+                       <xsl:when test="$p/Gt/@Length > 0">
+                               <!--<xsl:value-of select="substring ($s, string-length ($gt)+1)" />-->
+                               <xsl:value-of select="substring-after ($s, '&gt;')" />
+                       </xsl:when>
+                       <xsl:otherwise>
+                               <xsl:value-of select="$s" />
+                       </xsl:otherwise>
+               </xsl:choose>
+       </xsl:template>
+
+       <xsl:template name="GetEscapedParameter">
+               <xsl:param name="orig-parameter-type" />
+               <xsl:param name="parameter-type" />
+               <xsl:param name="parameter-types" />
+               <xsl:param name="escape" />
+               <xsl:param name="index" />
+
+               <xsl:choose>
+                       <xsl:when test="$index &gt; count($parameter-types)">
+                               <xsl:if test="$parameter-type != $orig-parameter-type">
+                                       <xsl:value-of select="$parameter-type" />
+                               </xsl:if>
+                               <!-- ignore -->
+                       </xsl:when>
+                       <xsl:when test="$parameter-types[position() = $index]/@name = $parameter-type">
+                               <xsl:value-of select="concat ($escape, $index - 1)" />
+                       </xsl:when>
+                       <xsl:otherwise>
+                               <xsl:variable name="typeparam" select="$parameter-types[position() = $index]/@name" />
+                               <xsl:call-template name="GetEscapedParameter">
+                                       <xsl:with-param name="orig-parameter-type" select="$orig-parameter-type" />
+                                       <xsl:with-param name="parameter-type">
+                                               <xsl:call-template name="Replace">
+                                                       <xsl:with-param name="s">
+                                                               <xsl:call-template name="Replace">
+                                                                       <xsl:with-param name="s">
+                                                                               <xsl:call-template name="Replace">
+                                                                                       <xsl:with-param name="s">
+                                                                                               <xsl:call-template name="Replace">
+                                                                                                       <xsl:with-param name="s" select="$parameter-type"/>
+                                                                                                       <xsl:with-param name="from" select="concat('&lt;', $typeparam, '&gt;')" />
+                                                                                                       <xsl:with-param name="to" select="concat('&lt;', $escape, $index - 1, '&gt;')" />
+                                                                                               </xsl:call-template>
+                                                                                       </xsl:with-param>
+                                                                                       <xsl:with-param name="from" select="concat('&lt;', $typeparam, ',')" />
+                                                                                       <xsl:with-param name="to" select="concat('&lt;', $escape, $index - 1, ',')" />
+                                                                               </xsl:call-template>
+                                                                       </xsl:with-param>
+                                                                       <xsl:with-param name="from" select="concat (',', $typeparam, '&gt;')" />
+                                                                       <xsl:with-param name="to" select="concat(',', $escape, $index - 1, '&gt;')" />
+                                                               </xsl:call-template>
+                                                       </xsl:with-param>
+                                                       <xsl:with-param name="from" select="concat (',', $typeparam, ',')" />
+                                                       <xsl:with-param name="to" select="concat(',', $escape, $index - 1, ',')" />
+                                               </xsl:call-template>
+                                       </xsl:with-param>
+                                       <xsl:with-param name="parameter-types" select="$parameter-types" />
+                                       <xsl:with-param name="typeparam" select="$typeparam" />
+                                       <xsl:with-param name="escape" select="$escape" />
+                                       <xsl:with-param name="index" select="$index + 1" />
+                               </xsl:call-template>
+                       </xsl:otherwise>
+               </xsl:choose>
+       </xsl:template>
+
+       <xsl:template name="GetLinkId">
+               <xsl:param name="type"/>
+               <xsl:param name="member"/>
+               <xsl:call-template name="GetLinkName">
+                       <xsl:with-param name="type" select="$type" />
+                       <xsl:with-param name="member" select="$member" />
+               </xsl:call-template>
+               <xsl:if test="count($member/Parameters/Parameter) &gt; 0">
+                       <xsl:text>(</xsl:text>
+                       <xsl:for-each select="Parameters/Parameter">
+                               <xsl:if test="not(position()=1)">,</xsl:if>
+                               <xsl:call-template name="GetParameterType">
+                                       <xsl:with-param name="type" select="$type" />
+                                       <xsl:with-param name="member" select="$member" />
+                                       <xsl:with-param name="parameter" select="." />
+                               </xsl:call-template>
+                       </xsl:for-each>
+                       <xsl:text>)</xsl:text>
+               </xsl:if>
+               <xsl:if test="$member/@MemberName='op_Implicit' or $member/@MemberName='op_Explicit'">
+                       <xsl:text>~</xsl:text>
+                       <xsl:variable name="parameter-rtf">
+                               <Parameter Type="{$member/ReturnValue/ReturnType}" />
+                       </xsl:variable>
+                       <xsl:call-template name="GetParameterType">
+                               <xsl:with-param name="type" select="$type" />
+                               <xsl:with-param name="member" select="$member" />
+                               <xsl:with-param name="parameter" select="msxsl:node-set($parameter-rtf)/Parameter" />
+                       </xsl:call-template>
+               </xsl:if>
+       </xsl:template>
+
+       <!-- 
+         - what should be <xsl:value-of select="@Type" /> becomes a nightmare once
+               - generics enter the picture, since a parameter type could come from the
+               - type itelf (becoming `N) or from the method (becoming ``N).
+         -->
+       <xsl:template name="GetParameterType">
+               <xsl:param name="type" />
+               <xsl:param name="member" />
+               <xsl:param name="parameter" />
+
+               <!-- the actual parameter type -->
+               <xsl:variable name="ptype">
+                       <xsl:choose>
+                               <xsl:when test="contains($parameter/@Type, '[')">
+                                       <xsl:value-of select="substring-before ($parameter/@Type, '[')" />
+                               </xsl:when>
+                               <xsl:when test="contains($parameter/@Type, '&amp;')">
+                                       <xsl:value-of select="substring-before ($parameter/@Type, '&amp;')" />
+                               </xsl:when>
+                               <xsl:when test="contains($parameter/@Type, '*')">
+                                       <xsl:value-of select="substring-before ($parameter/@Type, '*')" />
+                               </xsl:when>
+                               <xsl:otherwise>
+                                       <xsl:value-of select="$parameter/@Type" />
+                               </xsl:otherwise>
+                       </xsl:choose>
+               </xsl:variable>
+
+               <!-- parameter modifiers -->
+               <xsl:variable name="pmodifier">
+                       <xsl:call-template name="Replace">
+                               <xsl:with-param name="s" select="substring-after ($parameter/@Type, $ptype)" />
+                               <xsl:with-param name="from">&amp;</xsl:with-param>
+                               <xsl:with-param name="to">@</xsl:with-param>
+                       </xsl:call-template>
+               </xsl:variable>
+
+               <xsl:variable name="gen-type">
+                       <xsl:call-template name="GetEscapedParameter">
+                               <xsl:with-param name="orig-parameter-type" select="$ptype" />
+                               <xsl:with-param name="parameter-type">
+                                       <xsl:variable name="nested">
+                                               <xsl:call-template name="GetEscapedParameter">
+                                                       <xsl:with-param name="orig-parameter-type" select="$ptype" />
+                                                       <xsl:with-param name="parameter-type" select="$ptype" />
+                                                       <xsl:with-param name="parameter-types" select="$type/Docs/typeparam" />
+                                                       <xsl:with-param name="escape" select="'`'" />
+                                                       <xsl:with-param name="index" select="1" />
+                                               </xsl:call-template>
+                                       </xsl:variable>
+                                       <xsl:choose>
+                                               <xsl:when test="$nested != ''">
+                                                       <xsl:value-of select="$nested" />
+                                               </xsl:when>
+                                               <xsl:otherwise>
+                                                       <xsl:value-of select="$ptype" />
+                                               </xsl:otherwise>
+                                       </xsl:choose>
+                               </xsl:with-param>
+                               <xsl:with-param name="parameter-types" select="$member/Docs/typeparam" />
+                               <xsl:with-param name="escape" select="'``'" />
+                               <xsl:with-param name="index" select="1" />
+                       </xsl:call-template>
+               </xsl:variable>
+
+               <!-- the actual parameter type -->
+               <xsl:variable name="parameter-type">
+                       <xsl:choose>
+                               <xsl:when test="$gen-type != ''">
+                                       <xsl:value-of select="$gen-type" />
+                                       <xsl:value-of select="$pmodifier" />
+                               </xsl:when>
+                               <xsl:otherwise>
+                                       <xsl:value-of select="concat($ptype, $pmodifier)" />
+                               </xsl:otherwise>
+                       </xsl:choose>
+               </xsl:variable>
+
+               <!-- s/</{/g; s/>/}/g; so that less escaping is needed. -->
+               <xsl:call-template name="Replace">
+                       <xsl:with-param name="s">
+                               <xsl:call-template name="Replace">
+                                       <xsl:with-param name="s" select="translate ($parameter-type, '+', '.')" />
+                                       <xsl:with-param name="from">&gt;</xsl:with-param>
+                                       <xsl:with-param name="to">}</xsl:with-param>
+                               </xsl:call-template>
+                       </xsl:with-param>
+                       <xsl:with-param name="from">&lt;</xsl:with-param>
+                       <xsl:with-param name="to">{</xsl:with-param>
+               </xsl:call-template>
+       </xsl:template>
+
+       <xsl:template name="Replace">
+               <xsl:param name="s" />
+               <xsl:param name="from" />
+               <xsl:param name="to" />
+               <xsl:choose>
+                       <xsl:when test="not(contains($s, $from))">
+                               <xsl:value-of select="$s" />
+                       </xsl:when>
+                       <xsl:otherwise>
+                               <xsl:variable name="prefix" select="substring-before($s, $from)"/>
+                               <xsl:variable name="suffix" select="substring-after($s, $from)" />
+                               <xsl:value-of select="$prefix" />
+                               <xsl:value-of select="$to" />
+                               <xsl:call-template name="Replace">
+                                       <xsl:with-param name="s" select="$suffix" />
+                                       <xsl:with-param name="from" select="$from" />
+                                       <xsl:with-param name="to" select="$to" />
+                               </xsl:call-template>
+                       </xsl:otherwise>
+               </xsl:choose>
+       </xsl:template>
+
+       <xsl:template name="getmodifiers">
+               <xsl:param name="sig"/>
+               <xsl:param name="protection" select="true()"/>
+               <xsl:param name="inheritance" select="true()"/>
+               <xsl:param name="extra" select="true()"/>
+               <xsl:param name="typetype" select="false()"/>
+
+               <xsl:variable name="Sig">
+                       <xsl:text> </xsl:text>
+                       <xsl:choose>
+                               <xsl:when test="contains($sig, '{')">
+                                       <xsl:value-of select="substring-before ($sig, '{')" />
+                               </xsl:when>
+                               <xsl:otherwise>
+                                       <xsl:value-of select="$sig" />
+                               </xsl:otherwise>
+                       </xsl:choose>
+                       <xsl:text> </xsl:text>
+               </xsl:variable>
+
+               <xsl:if test="$protection">
+                       <xsl:if test="contains($Sig, ' public ')">public </xsl:if>
+                       <xsl:if test="contains($Sig, ' private ')">private </xsl:if>
+                       <xsl:if test="contains($Sig, ' protected ')">protected </xsl:if>
+                       <xsl:if test="contains($Sig, ' internal ')">internal </xsl:if>
+               </xsl:if>
+
+               <xsl:if test="contains($Sig, ' static ')">static </xsl:if>
+               <xsl:if test="contains($Sig, ' abstract ')">abstract </xsl:if>
+               <xsl:if test="contains($Sig, ' operator ')">operator </xsl:if>
+
+               <xsl:if test="contains($Sig, ' const ')">const </xsl:if>
+               <xsl:if test="contains($Sig, ' readonly ')">readonly </xsl:if>
+
+               <xsl:if test="$inheritance">
+                       <xsl:if test="contains($Sig, ' override ')">override </xsl:if>
+                       <xsl:if test="contains($Sig, ' new ')">new </xsl:if>
+               </xsl:if>
+
+               <xsl:if test="$extra">
+                       <xsl:if test="contains($Sig, ' sealed ')">sealed </xsl:if>
+                       <xsl:if test="contains($Sig, ' virtual ')">virtual </xsl:if>
+
+                       <xsl:if test="contains($Sig, ' extern ')">extern </xsl:if>
+                       <xsl:if test="contains($Sig, ' checked ')">checked </xsl:if>
+                       <xsl:if test="contains($Sig, ' unsafe ')">unsafe </xsl:if>
+                       <xsl:if test="contains($Sig, ' volatile ')">volatile </xsl:if>
+                       <xsl:if test="contains($Sig, ' explicit ')">explicit </xsl:if>
+                       <xsl:if test="contains($Sig, ' implicit ')">implicit </xsl:if>
+               </xsl:if>
+
+               <xsl:if test="$typetype">
+                       <xsl:if test="contains($Sig, ' class ')">class </xsl:if>
+                       <xsl:if test="contains($Sig, ' interface ')">interface </xsl:if>
+                       <xsl:if test="contains($Sig, ' struct ')">struct </xsl:if>
+                       <xsl:if test="contains($Sig, ' delegate ')">delegate </xsl:if>
+                       <xsl:if test="contains($Sig, ' enum ')">enum </xsl:if>
+               </xsl:if>
+       </xsl:template>
+
+       <xsl:template name="GetTypeDescription">
+               <xsl:variable name="sig" select="TypeSignature[@Language='C#']/@Value"/>
+               <xsl:choose>
+                       <xsl:when test="contains($sig, ' class ')">Class</xsl:when>
+                       <xsl:when test="contains($sig, ' interface ')">Interface</xsl:when>
+                       <xsl:when test="contains($sig, ' struct ')">Struct</xsl:when>
+                       <xsl:when test="contains($sig, ' delegate ')">Delegate</xsl:when>
+                       <xsl:when test="contains($sig, ' enum ')">Enum</xsl:when>
+               </xsl:choose>
+       </xsl:template>
+       
+       <xsl:template match="since">
+               <p>
+                       <i>Note: This namespace, class, or member is supported only in version <xsl:value-of select="@version" />
+                       and later.</i>
+               </p>
+       </xsl:template>
+
+       <xsl:template name="GetLinkTargetHtml">
+               <xsl:param name="type" />
+               <xsl:param name="cref" />
+
+               <xsl:variable name="href">
+                       <xsl:call-template name="GetLinkTarget">
+                               <xsl:with-param name="type" select="$type" />
+                               <xsl:with-param name="cref" select="$cref" />
+                       </xsl:call-template>
+               </xsl:variable>
+               <xsl:choose>
+                       <xsl:when test="string($href) = ''">
+                               <xsl:text>javascript:alert("Documentation not found.")</xsl:text>
+                       </xsl:when>
+                       <xsl:otherwise><xsl:value-of select="$href" /></xsl:otherwise>
+               </xsl:choose>
+       </xsl:template>
+
+</xsl:stylesheet>
diff --git a/mcs/tools/monkeydoc/Resources/mdoc-sections-css.xsl b/mcs/tools/monkeydoc/Resources/mdoc-sections-css.xsl
new file mode 100644 (file)
index 0000000..ab31043
--- /dev/null
@@ -0,0 +1,131 @@
+<?xml version="1.0"?>
+
+<!--
+       mdoc-sections-css.xsl: Common CSS implementation of mdoc-html-utils.xsl
+                              required functions.
+
+
+       Including XSLT files need to provide the following functions:
+
+               - CreateExpandedToggle()
+
+       Author: Jonathan Pryor  <jpryor@novell.com>
+-->
+
+<xsl:stylesheet
+       version="1.0"
+       xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
+       xmlns:monodoc="monodoc:///extensions"
+       exclude-result-prefixes="monodoc"
+       >
+
+       <xsl:template name="CreateH2Section">
+               <xsl:param name="name" />
+               <xsl:param name="id" select="''" />
+               <xsl:param name="child-id" select="generate-id (.)" />
+               <xsl:param name="content" />
+
+               <h2 class="Section">
+                       <xsl:if test="$id != ''">
+                               <xsl:attribute name="id">
+                                       <xsl:value-of select="$id" />
+                               </xsl:attribute>
+                       </xsl:if>
+                       <xsl:value-of select="$name" />
+               </h2>
+               <div class="SectionBox" id="{$child-id}">
+                       <xsl:copy-of select="$content" />
+               </div>
+       </xsl:template>
+
+       <xsl:template name="CreateH3Section">
+               <xsl:param name="name" />
+               <xsl:param name="id" select="''" />
+               <xsl:param name="class" select="''" />
+               <xsl:param name="child-id" select="generate-id (.)" />
+               <xsl:param name="content" />
+
+               <h3>
+                       <xsl:if test="$class != ''">
+                               <xsl:attribute name="class">
+                                       <xsl:value-of select="$class" />
+                               </xsl:attribute>
+                       </xsl:if>
+                       <xsl:if test="$id != ''">
+                               <xsl:attribute name="id">
+                                       <xsl:value-of select="$id" />
+                               </xsl:attribute>
+                       </xsl:if>
+                       <xsl:value-of select="$name" />
+               </h3>
+               <blockquote id="{$child-id}">
+                       <xsl:copy-of select="$content" />
+               </blockquote>
+       </xsl:template>
+
+       <xsl:template name="CreateH4Section">
+               <xsl:param name="name" />
+               <xsl:param name="id" select="''" />
+               <xsl:param name="child-id" select="generate-id (.)" />
+               <xsl:param name="content" />
+
+               <h4 class="Subsection">
+                       <xsl:if test="$id != ''">
+                               <xsl:attribute name="id">
+                                       <xsl:value-of select="$id" />
+                               </xsl:attribute>
+                       </xsl:if>
+                       <xsl:value-of select="$name" />
+               </h4>
+               <blockquote class="SubsectionBox" id="{$child-id}">
+                       <xsl:copy-of select="$content" />
+               </blockquote>
+       </xsl:template>
+
+       <xsl:template name="CreateEnumerationTable">
+               <xsl:param name="content" />
+               <table class="Enumeration">
+                       <tr><th>Member Name</th><th>Description</th></tr>
+                       <xsl:copy-of select="$content" />
+               </table>
+       </xsl:template>
+
+       <xsl:template name="CreateHeader">
+               <xsl:param name="content" />
+               <xsl:copy-of select="$content" />
+       </xsl:template>
+
+       <xsl:template name="CreateListTable">
+               <xsl:param name="header" />
+               <xsl:param name="content" />
+               <table class="Documentation">
+                       <tr><xsl:copy-of select="$header" /></tr>
+                       <xsl:copy-of select="$content" />
+               </table>
+       </xsl:template>
+
+       <xsl:template name="CreateMembersTable">
+               <xsl:param name="content" />
+               <table class="TypeMembers">
+                       <xsl:copy-of select="$content" />
+               </table>
+       </xsl:template>
+
+       <xsl:template name="CreateSignature">
+               <xsl:param name="content" />
+               <h2>Syntax</h2>
+               <div class="Signature">
+                       <xsl:copy-of select="$content" />
+               </div>
+       </xsl:template>
+
+       <xsl:template name="CreateTypeDocumentationTable">
+               <xsl:param name="content" />
+               <table class="TypeDocumentation">
+               <tr><th>Type</th><th>Reason</th></tr>
+                       <xsl:copy-of select="$content" />
+               </table>
+       </xsl:template>
+
+</xsl:stylesheet>
+
diff --git a/mcs/tools/monkeydoc/Resources/mdoc-sections.xsl b/mcs/tools/monkeydoc/Resources/mdoc-sections.xsl
new file mode 100644 (file)
index 0000000..03d8383
--- /dev/null
@@ -0,0 +1,123 @@
+<?xml version="1.0"?>
+
+<!--
+       mdoc-sections.xsl: Common non-CSS implementation of mdoc-html-utils.xsl
+                          required functions.
+
+       Author: Jonathan Pryor  <jpryor@novell.com>
+
+-->
+
+<xsl:stylesheet
+       version="1.0"
+       xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
+       xmlns:monodoc="monodoc:///extensions"
+       exclude-result-prefixes="monodoc"
+       >
+       <xsl:template name="CreateH2Section">
+               <xsl:param name="name" />
+               <xsl:param name="id" select="''" />
+               <xsl:param name="child-id" select="generate-id (.)" />
+               <xsl:param name="content" />
+
+               <h2 class="Section">
+                       <xsl:value-of select="$name" />
+               </h2>
+               <blockquote id="{$child-id}">
+                       <xsl:copy-of select="$content" />
+               </blockquote>
+       </xsl:template>
+
+       <xsl:template name="CreateH3Section">
+               <xsl:param name="name" />
+               <xsl:param name="id" select="''" />
+               <xsl:param name="class" select="''" />
+               <xsl:param name="child-id" select="generate-id (.)" />
+               <xsl:param name="content" />
+
+               <h3>
+                       <xsl:value-of select="$name" />
+               </h3>
+               <blockquote id="{$child-id}">
+                       <xsl:copy-of select="$content" />
+               </blockquote>
+       </xsl:template>
+
+       <xsl:template name="CreateH4Section">
+               <xsl:param name="name" />
+               <xsl:param name="id" select="''" />
+               <xsl:param name="child-id" select="generate-id (.)" />
+               <xsl:param name="content" />
+
+               <h4 class="Subsection">
+                       <xsl:value-of select="$name" />
+               </h4>
+               <blockquote class="SubsectionBox" id="{$child-id}">
+                       <xsl:copy-of select="$content" />
+               </blockquote>
+       </xsl:template>
+
+       <xsl:template name="CreateEnumerationTable">
+               <xsl:param name="content" />
+               <table class="EnumerationsTable" border="1" cellpadding="10" width="100%">
+                       <tr bgcolor="#f2f2f2">
+                               <th>Member Name</th>
+                               <th>Description</th>
+                       </tr>
+                       <xsl:copy-of select="$content" />
+               </table>
+       </xsl:template>
+
+       <xsl:template name="CreateHeader">
+               <xsl:param name="content" />
+               <table class="HeaderTable" width="100%" cellpadding="5">
+                       <tr bgcolor="#b0c4de">
+                               <td>
+                                       <xsl:copy-of select="$content" />
+                               </td>
+                       </tr>
+               </table>
+       </xsl:template>
+
+       <xsl:template name="CreateListTable">
+               <xsl:param name="header" />
+               <xsl:param name="content" />
+               <table border="1" cellpadding="3" width="100%">
+                       <tr bgcolor="#f2f2f2" valign="top">
+                               <xsl:copy-of select="$header" />
+                       </tr>
+                       <xsl:copy-of select="$content" />
+               </table>
+       </xsl:template>
+
+       <xsl:template name="CreateMembersTable">
+               <xsl:param name="content" />
+               <table border="1" cellpadding="6" width="100%">
+                       <xsl:copy-of select="$content" />
+               </table>
+       </xsl:template>
+
+       <xsl:template name="CreateSignature">
+               <xsl:param name="content" />
+               <table class="SignatureTable" bgcolor="#c0c0c0" cellspacing="0" width="100%">
+               <tr><td>
+                       <table class="InnerSignatureTable" cellpadding="10" cellspacing="0" width="100%">
+                       <tr bgcolor="#f2f2f2">
+                               <td>
+                               <xsl:copy-of select="$content" />
+                       </td></tr>
+                       </table>
+               </td></tr>
+               </table>
+               <br />
+       </xsl:template>
+       
+       <xsl:template name="CreateTypeDocumentationTable">
+               <xsl:param name="content" />
+               <table class="TypePermissionsTable" border="1" cellpadding="6" width="100%">
+               <tr bgcolor="#f2f2f2"><th>Type</th><th>Reason</th></tr>
+                       <xsl:copy-of select="$content" />
+               </table>
+       </xsl:template>
+
+</xsl:stylesheet>
diff --git a/mcs/tools/monkeydoc/Resources/mono-ecma-css.xsl b/mcs/tools/monkeydoc/Resources/mono-ecma-css.xsl
new file mode 100644 (file)
index 0000000..017699d
--- /dev/null
@@ -0,0 +1,47 @@
+<?xml version="1.0"?>
+
+<!--
+       mono-ecma-css.xsl: ECMA-style docs to HTML+CSS stylesheet trasformation
+       based on mono-ecma.xsl by Joshua Tauberer
+
+       Author: Joshua Tauberer (tauberer@for.net)
+       Author: Mario Sopena Novales (mario.sopena@gmail.com)
+
+       TODO:
+               split this into multiple files
+-->
+
+<xsl:stylesheet
+       version="1.0"
+       xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
+       xmlns:monodoc="monodoc:///extensions"
+       exclude-result-prefixes="monodoc"
+       >
+       <xsl:include href="mdoc-sections-css.xsl" />
+       <xsl:include href="mono-ecma-impl.xsl" />
+       
+       <xsl:output omit-xml-declaration="yes" />
+
+       <xsl:template name="CreateExpandedToggle">
+               <img src="xtree/images/clean/Lminus.gif" border="0" align="top"/>
+       </xsl:template>
+
+       <xsl:template name="CreateCodeBlock">
+               <xsl:param name="language" />
+               <xsl:param name="content" />
+
+               <div class="CodeExample">
+                       <p><b><xsl:value-of select="$language"/> Example</b></p>
+                       <div>
+                       <pre>
+                               <!--
+                               <xsl:value-of select="monodoc:Colorize($content, string($language))" 
+                                       disable-output-escaping="yes" />
+                                 -->
+                               <xsl:value-of select="$content" />
+                       </pre>
+                       </div>
+               </div>
+       </xsl:template>
+
+</xsl:stylesheet>
diff --git a/mcs/tools/monkeydoc/Resources/mono-ecma-impl.xsl b/mcs/tools/monkeydoc/Resources/mono-ecma-impl.xsl
new file mode 100644 (file)
index 0000000..61d827e
--- /dev/null
@@ -0,0 +1,540 @@
+<?xml version="1.0"?>
+
+<!--
+       mono-ecma-impl.xsl: ECMA-style docs to HTML stylesheet trasformation
+
+       Author: Joshua Tauberer (tauberer@for.net)
+       Author: Jonathan Pryor (jpryor@novell.com)
+
+       This file requires that including files define the following callable
+       templates:
+               - CreateCodeBlock (language, content)
+               - CreateEnumerationTable (content)
+               - CreateHeader (content)
+               - CreateListTable (header, content)
+               - CreateMembersTable (content)
+               - CreateSignature (content)
+               - CreateTypeDocumentationTable (content)
+
+-->
+
+<xsl:stylesheet
+       version="1.0"
+       xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
+       xmlns:monodoc="monodoc:///extensions"
+       exclude-result-prefixes="monodoc"
+       >
+       <xsl:include href="mdoc-html-utils.xsl" />
+       
+       <!-- TEMPLATE PARAMETERS -->
+
+       <xsl:param name="show"/>
+       <xsl:param name="membertype"/>
+       <xsl:param name="namespace"/>
+
+       <!-- THE MAIN RENDERING TEMPLATE -->
+
+       <xsl:template match="Type|elements">
+               <!-- The namespace that the current type belongs to. -->
+               <xsl:variable name="TypeNamespace" select="substring(@FullName, 1, string-length(@FullName) - string-length(@Name) - 1)"/>
+
+               <!-- HEADER -->
+
+               <xsl:variable name="typename" select="translate (@FullName, '+', '.')" />
+               <xsl:variable name="typelink">
+                       <xsl:call-template name="GetEscapedTypeName">
+                               <xsl:with-param name="typename" select="@FullName" />
+                       </xsl:call-template>
+               </xsl:variable>
+               <xsl:variable name="basename">
+                       <xsl:if test="Base/BaseTypeName">
+                               <xsl:value-of select="translate (Base/BaseTypeName, '+', '.')" />
+                       </xsl:if>
+               </xsl:variable>
+               <xsl:variable name="baselink">
+                       <xsl:if test="Base/BaseTypeName">
+                               <xsl:call-template name="GetEscapedTypeName">
+                                       <xsl:with-param name="typename" select="Base/BaseTypeName" />
+                               </xsl:call-template>
+                       </xsl:if>
+               </xsl:variable>
+
+               <xsl:call-template name="CreateHeader">
+                       <xsl:with-param name="content">
+                         <ul class="breadcrumb">
+                               <xsl:choose>
+                                       <xsl:when test="$show='masteroverview'">
+                                               <li class="namespace">
+                                                 <xsl:text>Namespaces in this Collection</xsl:text>
+                                               </li>
+                                       </xsl:when>
+                                       <xsl:when test="$show='typeoverview'">
+                                               <li class="namespace">
+                                               <a>
+                                                       <xsl:attribute name="href">N:<xsl:value-of select="$TypeNamespace"/></xsl:attribute>
+                                                       <xsl:value-of select="$TypeNamespace"/></a>
+                                               </li>
+                                               <li class="pubclass">
+                                                       <xsl:value-of select="@Name"/>
+                                               </li>
+                                       </xsl:when>
+                                       <xsl:when test="$show='members'">
+                                               <li class="namespace">
+                                                 <a>
+                                                   <xsl:attribute name="href">N:<xsl:value-of select="$TypeNamespace"/></xsl:attribute>
+                                                   <xsl:value-of select="$TypeNamespace"/>
+                                                 </a>
+                                               </li>
+                                               <li class="pubclass">
+                                                 <a>
+                                                       <xsl:attribute name="href">
+                                                               <xsl:text>T:</xsl:text>
+                                                               <xsl:value-of select="$typelink" />
+                                                       </xsl:attribute>                                                  
+                                                       <xsl:value-of select="@Name"/>
+                                                 </a>
+                                               </li>
+                                               <li class="members">
+                                                 Members
+                                               </li>
+                                       </xsl:when>
+                                       <xsl:when test="$show='member' or $show='overloads'">
+                                               <li class="namespace">
+                                               <a>
+                                                       <xsl:attribute name="href">N:<xsl:value-of select="$TypeNamespace"/></xsl:attribute>
+                                                       <xsl:value-of select="$TypeNamespace"/></a>
+                                               </li>
+                                               <li class="pubclass">
+                                                 <a>
+                                                       <xsl:attribute name="href">
+                                                               <xsl:text>T:</xsl:text>
+                                                               <xsl:value-of select="$typelink" />
+                                                       </xsl:attribute>                                                  
+                                                       <xsl:value-of select="@Name"/>
+                                                 </a>
+                                               </li>
+                                               <li class="pubproperty">
+                                                 <xsl:choose>
+                                                 <xsl:when test="$membertype='Operator'">
+                                                       <xsl:value-of select="$typename"/>
+                                                       <xsl:value-of select="' '"/> <!-- hard space -->
+                                                       <xsl:value-of select="substring-after(Members/Member[MemberType='Method'][position()=$index+1]/@MemberName, 'op_')"/>
+                                                 </xsl:when>
+                                                 <xsl:when test="$membertype='Constructor'">
+                                                       <xsl:value-of select="$typename"/>
+                                                 </xsl:when>
+                                                 <xsl:otherwise>
+                                                       <xsl:value-of select="Members/Member[MemberType=$membertype][position()=$index+1]/@MemberName"/>
+                                                 </xsl:otherwise>
+                                                 </xsl:choose>
+                                               </li>
+                                       </xsl:when>
+                                       <xsl:when test="$show='namespace'">
+                                               <li class="namespace">
+                                                 <xsl:value-of select="$namespace"/>
+                                               </li>
+                                       </xsl:when>
+                               </xsl:choose>
+                       </ul>
+                       <div class="named-header">
+                               <xsl:choose>
+                                       <xsl:when test="$show='masteroverview'">
+                                               <xsl:text>Master Overview</xsl:text>
+                                       </xsl:when>
+                                       <xsl:when test="$show='typeoverview'">
+                                               <xsl:value-of select="$typename"/>
+                                               <xsl:value-of select="' '"/>
+                                               <xsl:call-template name="gettypetype"/>
+                                       </xsl:when>
+                                       <xsl:when test="$show='members' and $membertype='All'">
+                                               <xsl:value-of select="$typename"/>
+                                               <xsl:text> Members</xsl:text>
+                                       </xsl:when>
+                                       <xsl:when test="$show='members'">
+                                               <xsl:value-of select="$typename"/>
+                                               <xsl:text>: </xsl:text>
+                                               <xsl:value-of select="$membertype"/>
+                                               <xsl:text> Members</xsl:text>
+                                       </xsl:when>
+                                       <xsl:when test="$show='member'">
+                                               <xsl:choose>
+                                               <xsl:when test="$membertype='Operator'">
+                                                       <xsl:value-of select="$typename"/>
+                                                       <xsl:value-of select="' '"/> <!-- hard space -->
+                                                       <xsl:value-of select="substring-after(Members/Member[MemberType='Method'][position()=$index+1]/@MemberName, 'op_')"/>
+                                               </xsl:when>
+                                               <xsl:when test="$membertype='Constructor'">
+                                                       <xsl:value-of select="$typename"/>
+                                               </xsl:when>
+                                               <xsl:otherwise>
+                                                       <xsl:value-of select="$typename"/>.<xsl:value-of select="Members/Member[MemberType=$membertype][position()=$index+1]/@MemberName"/>
+                                               </xsl:otherwise>
+                                               </xsl:choose>
+                                               <xsl:value-of select="' '"/>
+                                               <xsl:value-of select="$membertype"/>
+                                       </xsl:when>
+
+                                       <xsl:when test="$show='namespace'">
+                                               <xsl:value-of select="$namespace"/>
+                                               <xsl:text> Namespace</xsl:text>
+                                       </xsl:when>
+                                       
+                                       <xsl:when test="$show='overloads'">
+                                               <xsl:value-of select="$typename"/>.<xsl:value-of select="$index"/> Overloads
+                                       </xsl:when>
+
+                               </xsl:choose>
+                       </div>
+                       </xsl:with-param>
+               </xsl:call-template>
+
+               <!-- SELECT WHAT TYPE OF VIEW:
+                               typeoverview
+                               members
+                               member
+                               -->
+               <div class="Content">
+               <xsl:choose>
+               <xsl:when test="$show='masteroverview'">
+               
+                       <xsl:for-each select="namespace">
+                               <xsl:sort select="@ns"/>
+                               
+                               <!-- Don't display the namespace if it is a sub-namespace of another one.
+                                    But don't consider namespaces without periods, e.g. 'System', to be
+                                        parent namespaces because then most everything will get grouped under it. -->
+                               <xsl:variable name="ns" select="@ns"/>
+                               <xsl:if test="count(parent::*/namespace[not(substring-before(@ns, '.')='') and starts-with($ns, concat(@ns, '.'))])=0">
+
+                               <p>
+                                       <b><a href="N:{@ns}"><xsl:value-of select="@ns"/></a></b>
+                               </p>
+                               <blockquote>
+                                       <div>
+                                       <xsl:apply-templates select="summary" mode="notoppara"/>
+                                       </div>
+                                       
+                                       <!-- Display the sub-namespaces of this namespace -->
+                                       <xsl:if test="not(substring-before($ns, '.')='')">
+                                       <xsl:for-each select="parent::*/namespace[starts-with(@ns, concat($ns, '.'))]">
+                                               <br/>
+                                               <div><a href="N:{@ns}"><xsl:value-of select="@ns"/></a></div>
+                                               <div><xsl:apply-templates select="summary" mode="notoppara"/></div>                                             
+                                       </xsl:for-each>
+                                       </xsl:if>
+                               </blockquote>
+                               
+                               </xsl:if>
+                       </xsl:for-each>
+                       
+               </xsl:when>
+               <!-- TYPE OVERVIEW -->
+               <xsl:when test="$show='typeoverview'">
+                       <xsl:variable name="implemented" select="monodoc:MonoImpInfo(string(AssemblyInfo/AssemblyName), string(@FullName), true())" />
+                       <xsl:call-template name="CreateTypeOverview">
+                               <xsl:with-param name="implemented" select="$implemented" />
+                               <xsl:with-param name="show-members-link" select="true()" />
+                       </xsl:call-template>
+                       
+
+                       <!-- signature -->
+                       <xsl:call-template name="CreateTypeSignature" />
+
+                       <xsl:call-template name="DisplayDocsInformation">
+                               <xsl:with-param name="linkid" select="concat ('T:', @FullName)" />
+                       </xsl:call-template>
+               </xsl:when>
+
+               <!-- MEMBER LISTING -->
+               <xsl:when test="$show='members'">
+                       <xsl:if test="$membertype='All'">
+                               <p>
+                                       The members of <xsl:value-of select="$typename"/> are listed below.
+                               </p>
+
+                               <xsl:if test="Base/BaseTypeName">
+                                       <p>
+                                               <xsl:text>See Also: </xsl:text>
+                                               <a>
+                                                       <xsl:attribute name="href">T:<xsl:value-of select="$baselink"/>/*</xsl:attribute>
+                                                       <xsl:text>Inherited members from </xsl:text>
+                                                       <xsl:value-of select="$basename"/>
+                                               </a>
+                                       </p>
+                               </xsl:if>
+
+                               <ul class="TypeMembersIndex">
+                                       <xsl:if test="count(Members/Member[MemberType='Constructor'])">
+                                               <li>
+                                                       <a><xsl:attribute name="href">T:<xsl:value-of select="$typelink"/>/C</xsl:attribute>Constructors</a>
+                                               </li>
+                                       </xsl:if>
+                                       <xsl:if test="count(Members/Member[MemberType='Field'])">
+                                               <li>
+                                                       <a><xsl:attribute name="href">T:<xsl:value-of select="$typelink"/>/F</xsl:attribute>Fields</a>
+                                               </li>
+                                       </xsl:if>
+                                       <xsl:if test="count(Members/Member[MemberType='Property'])">
+                                               <li>
+                                                       <a><xsl:attribute name="href">T:<xsl:value-of select="$typelink"/>/P</xsl:attribute>Properties</a>
+                                               </li>
+                                       </xsl:if>
+                                       <xsl:if test="count(Members/Member[MemberType='Method' and not(starts-with(@MemberName,'op_'))])">
+                                               <li>
+                                                       <a><xsl:attribute name="href">T:<xsl:value-of select="$typelink"/>/M</xsl:attribute>Methods</a>
+                                               </li>
+                                       </xsl:if>
+                                       <xsl:if test="count(Members/Member[MemberType='Event'])">
+                                               <li>
+                                                       <a><xsl:attribute name="href">T:<xsl:value-of select="$typelink"/>/E</xsl:attribute>Events</a>
+                                               </li>
+                                       </xsl:if>
+                                       <xsl:if test="count(Members/Member[MemberType='Method' and starts-with(@MemberName,'op_')])">
+                                               <li>
+                                                       <a><xsl:attribute name="href">T:<xsl:value-of select="$typelink"/>/E</xsl:attribute>Events</a>
+                                               </li>
+                                       </xsl:if>
+                                       <xsl:if test="count(Members/Member[MemberType='ExtensionMethod'])">
+                                               <li>
+                                                       <a><xsl:attribute name="href">T:<xsl:value-of select="$typelink"/>/X</xsl:attribute>Extension Methods</a>
+                                               </li>
+                                       </xsl:if>
+                               </ul>
+
+                               <!-- list each type of member (public, then protected) -->
+
+                               <xsl:call-template name="ListAllMembers" />
+                       </xsl:if>
+
+                       <xsl:if test="not($membertype='All')">
+                               <!-- list the members of this type (public, then protected) -->
+
+                               <p>
+                                       The
+                                       <xsl:call-template name="membertypeplurallc"><xsl:with-param name="name" select="$membertype"/></xsl:call-template>
+                                       of <xsl:value-of select="$typename"/> are listed below.  For a list of all members, see the <a>
+                                       <xsl:attribute name="href">T:<xsl:value-of select="$typelink"/>/*</xsl:attribute>
+                                       <xsl:value-of select="@Name"/> Members</a> list.
+                               </p>
+                               
+                               <xsl:if test="Base/BaseTypeName">
+                                       <p>
+                                               <xsl:text>See Also: </xsl:text>
+                                               <a>
+                                                       <xsl:attribute name="href">T:<xsl:value-of select="$baselink"/>/*</xsl:attribute>
+                                                       <xsl:text>Inherited members from </xsl:text>
+                                                       <xsl:value-of select="$basename"/>
+                                               </a>
+                                       </p>
+                               </xsl:if>
+
+                               <xsl:call-template name="ListMembers">
+                                       <xsl:with-param name="listmembertype" select="$membertype"/>
+                                       <xsl:with-param name="showprotected" select="false()"/>
+                               </xsl:call-template>
+
+                               <xsl:call-template name="ListMembers">
+                                       <xsl:with-param name="listmembertype" select="$membertype"/>
+                                       <xsl:with-param name="showprotected" select="true()"/>
+                               </xsl:call-template>
+                       </xsl:if>
+
+               </xsl:when>
+               
+               <xsl:when test="$show='overloads'">
+                               <p>
+                                       The overloads of <xsl:value-of select="$index"/>
+                                       are listed below.  For a list of all members, see the <a>
+                                       <xsl:attribute name="href">T:<xsl:value-of select="$typelink"/>/*</xsl:attribute>
+                                       <xsl:value-of select="@Name"/> Members</a> list.
+                               </p>
+                               
+                               <!-- TODO: can we make this actually test if there are any overloads
+                               <xsl:if test="Base/BaseTypeName">
+                                       <p>
+                                               See Also: <a>
+                                       <xsl:attribute name="href">T:<xsl:value-of select="Base/BaseTypeName"/>/*</xsl:attribute>
+                                       Inherited members</a> from <xsl:value-of select="Base/BaseTypeName"/>
+                                       </p>
+                               </xsl:if>
+                                -->
+                                
+                               <xsl:call-template name="ListMembers">
+                                       <xsl:with-param name="listmembertype" select="$membertype"/>
+                                       <xsl:with-param name="showprotected" select="false()"/>
+                                       <xsl:with-param name="overloads-mode" select="true()"/>
+                               </xsl:call-template>
+
+                               <xsl:call-template name="ListMembers">
+                                       <xsl:with-param name="listmembertype" select="$membertype"/>
+                                       <xsl:with-param name="showprotected" select="true()"/>
+                                       <xsl:with-param name="overloads-mode" select="true()"/>
+                               </xsl:call-template>
+               </xsl:when>
+               <!-- MEMBER DETAILS -->
+               <xsl:when test="$show='member'">
+                       <xsl:variable name="Type" select="."/>
+
+                       <!-- select the member, this just loops through the one member that we are to display -->
+                       <xsl:for-each select="Members/Member[MemberType=$membertype or ($membertype='Operator' and MemberType='Method')][position()=$index+1]">
+
+                               <!-- summary -->
+                               
+                               <xsl:call-template name="CreateMemberOverview">
+                                       <xsl:with-param name="implemented" select="monodoc:MonoImpInfo(string(AssemblyInfo/AssemblyName), string(@FullName), true())" />
+                               </xsl:call-template>
+
+                               <xsl:call-template name="CreateMemberSignature">
+                                       <xsl:with-param name="linkid" select="concat ('T:', @FullName)" />
+                               </xsl:call-template>
+
+                               <div class="MemberBox">
+                                       <xsl:call-template name="DisplayDocsInformation">
+                                               <xsl:with-param name="linkid" select="concat ('T:', @FullName)" />
+                                       </xsl:call-template>
+                               </div>
+
+                       </xsl:for-each>
+
+               </xsl:when>
+
+               <!-- NAMESPACE SUMMARY -->
+               <xsl:when test="$show='namespace'">
+
+                       <!-- summary -->
+
+                       <p>
+                               <xsl:apply-templates select="summary" mode="notoppara"/>
+                               <xsl:if test="monodoc:MonoEditing()">
+                                       <xsl:value-of select="' '" />
+                                       [<a href="{monodoc:EditUrlNamespace (., $namespace, 'summary')}">Edit</a>]
+                               </xsl:if>
+                       </p>
+
+                       <!-- remarks -->
+
+                       <xsl:if test="not(remarks = '')">
+                               <h2>Remarks</h2>
+                               <div class="SectionBox">
+                                       <xsl:apply-templates select="remarks"/>
+                                       <xsl:if test="monodoc:MonoEditing()">
+                                               <xsl:value-of select="' '" />
+                                               [<a href="{monodoc:EditUrlNamespace (., $namespace, 'remarks')}">Edit</a>]
+                                       </xsl:if>
+                               </div>
+                       </xsl:if>
+               
+                       <xsl:call-template name="namespacetypes">
+                               <xsl:with-param name="typetype" select="'class'"/>
+                               <xsl:with-param name="typetitle" select="'Classes'"/>
+                       </xsl:call-template>
+
+                       <xsl:call-template name="namespacetypes">
+                               <xsl:with-param name="typetype" select="'interface'"/>
+                               <xsl:with-param name="typetitle" select="'Interfaces'"/>
+                       </xsl:call-template>
+
+                       <xsl:call-template name="namespacetypes">
+                               <xsl:with-param name="typetype" select="'struct'"/>
+                               <xsl:with-param name="typetitle" select="'Structs'"/>
+                       </xsl:call-template>
+
+                       <xsl:call-template name="namespacetypes">
+                               <xsl:with-param name="typetype" select="'delegate'"/>
+                               <xsl:with-param name="typetitle" select="'Delegates'"/>
+                       </xsl:call-template>
+
+                       <xsl:call-template name="namespacetypes">
+                               <xsl:with-param name="typetype" select="'enum'"/>
+                               <xsl:with-param name="typetitle" select="'Enumerations'"/>
+                       </xsl:call-template>
+
+                       
+               </xsl:when>
+
+               <!-- don't know what kind of page this is -->
+               <xsl:otherwise>
+                       Don't know what to do!
+               </xsl:otherwise>
+
+               </xsl:choose>
+               </div>
+               
+               <!-- FOOTER -->
+               
+               <div class="Footer">
+               </div>
+
+       </xsl:template>
+
+       <xsl:template name="GetLinkTarget">
+               <xsl:param name="type" />
+               <xsl:param name="cref" />
+
+               <xsl:value-of select="$cref" />
+       </xsl:template>
+
+       <xsl:template name="namespacetypes">
+               <xsl:param name="typetype"/>
+               <xsl:param name="typetitle"/>
+
+               <xsl:variable name="NODES" select="*[name()=$typetype]"/>
+
+               <xsl:if test="count($NODES)">
+
+               <xsl:call-template name="CreateH2Section">
+                       <xsl:with-param name="name" select="$typetitle" />
+                       <xsl:with-param name="child-id" select="$typetitle" />
+                       <xsl:with-param name="content">
+               
+               <xsl:call-template name="CreateTypeDocumentationTable">
+               <xsl:with-param name="content">
+                       <xsl:for-each select="$NODES">
+                               <xsl:sort select="@name"/>
+
+                               <tr>
+                                       <td>
+                                               <a>
+                                                       <xsl:attribute name="href">
+                                                               <xsl:text>T:</xsl:text>
+                                                               <xsl:call-template name="GetEscapedTypeName">
+                                                                       <xsl:with-param name="typename" select="@fullname" />
+                                                               </xsl:call-template>
+                                                       </xsl:attribute>
+                                                       <xsl:value-of select="@name"/>
+                                               </a>
+
+                                               <xsl:variable name="containingtype" select="substring-before(@fullname, concat('+',@name))"/>
+                                               <xsl:if test="$containingtype">
+                                               <br/>(in
+                                                       <xsl:call-template name="maketypelink">
+                                                               <xsl:with-param name="type" select="$containingtype"/>
+                                                               <xsl:with-param name="wrt" select="$namespace"/>
+                                                       </xsl:call-template>)
+                                               </xsl:if>
+                                       </td>
+                                       <td>
+                                               <xsl:apply-templates select="summary" mode="notoppara"/>
+
+                                               <xsl:variable name="MonoImplInfo" select="monodoc:MonoImpInfo(string(@assembly), string(@fullname), false())"/>
+                                               <xsl:if test="$MonoImplInfo"><br/><b><xsl:value-of disable-output-escaping="yes" select="$MonoImplInfo"/></b></xsl:if>
+                                       </td>
+                               </tr>
+                       </xsl:for-each>
+               </xsl:with-param>
+               </xsl:call-template>
+                       </xsl:with-param>
+               </xsl:call-template>
+
+               </xsl:if>
+       </xsl:template>
+       
+       <xsl:template name="CreateEditLink">
+               <xsl:param name="e" />
+               <xsl:if test="monodoc:MonoEditing()">
+                       <xsl:value-of select="' '" />
+                       [<a href="{monodoc:EditUrl ($e)}">Edit</a>]
+               </xsl:if>
+       </xsl:template>
+
+</xsl:stylesheet>
diff --git a/mcs/tools/monkeydoc/Resources/mono-ecma.css b/mcs/tools/monkeydoc/Resources/mono-ecma.css
new file mode 100644 (file)
index 0000000..2ffa1d2
--- /dev/null
@@ -0,0 +1,596 @@
+body
+{
+    font-family: "Lucida Grande", Geneva, Helvetica, Arial, Verdana, Sans-Serif;
+    margin: 0;
+    padding: 0;
+    color: #333333;
+}
+
+a:link
+{
+    color: #034af3;
+    text-decoration: underline;
+}
+a:visited
+{
+    color: #505abc;
+}
+a:hover
+{
+    color: #1d60ff;
+    text-decoration: none;
+}
+a:active
+{
+    color: #12eb87;
+}
+
+pre
+{
+    font-family: Consolas, "Courier New", Monospace;
+    border: 1px solid #CCCCCC;
+    background-color: #F7F7F7;
+    padding: 7px;
+    margin: 0 20px 0 20px;
+    line-height: 1.3em;
+    -moz-border-radius: 3px;
+    -webkit-border-radius: 3px;
+    border-radius: 3px;
+}
+
+img
+{
+    border: 0px;
+}
+
+/* HEADINGS   
+----------------------------------------------------------*/
+h1, h2, h3, h4, h5, h6
+{
+    color: #000;
+    font-family: Arial, Helvetica, sans-serif;
+}
+
+h1
+{
+    font-size: 16pt;
+    padding-bottom: 0;
+    margin-bottom: 0;
+}
+h2
+{
+    font-size: 14pt;
+    padding: 0 0 1px 0;
+    border-bottom: 1px solid #DDDDDD;
+    margin-top: 20px;
+}
+h3
+{
+    font-size: 12pt;
+    margin-top: 20px;
+    margin-bottom: 5px;
+}
+h4
+{
+    font-size: 11pt;
+}
+h5, h6
+{
+    font-size: 10pt;
+}
+
+/* this rule styles <h2> tags that are the 
+first child of the left and right table columns */
+.rightColumn > h1, .rightColumn > h2, .leftColumn > h1, .leftColumn > h2
+{
+    margin-top: 0;
+}
+
+/* PRIMARY LAYOUT ELEMENTS   
+----------------------------------------------------------*/
+
+/* you can specify a greater or lesser percentage for the 
+page width. Or, you can specify an exact pixel width. */
+.page
+{
+    padding: 0;
+    margin: 0;
+}
+
+#header
+{
+    position: relative;
+    margin-bottom: 0px;
+    color: #000;
+    padding: 0 0 0 15px;
+    background: url('mdocimages/headerbg.png');
+    background-position: right;
+    background-repeat: no-repeat;
+    background-color: #679EF1;
+    height: 40px;
+    border-bottom: 1px solid #98C2F7;
+    border-bottom: 1px dotted #3363BD;
+}
+
+#header h1
+{
+    font-weight: bold;
+    padding: 0;
+    margin: 0;
+    color: #fff;
+    border: none;
+    line-height: 1.8em;
+    font-family: Arial, Helvetica, sans-serif;
+    font-size: 22px !important;
+}
+
+#main
+{
+    padding: 0px 0px 15px 0px;
+    background-color: #fff;
+    margin-bottom: 30px;
+    _height: 1px; /* only IE6 applies CSS properties starting with an underscore */
+}
+
+#footer
+{
+    color: #999;
+    padding: 10px 0;
+    text-align: center;
+    line-height: normal;
+    margin: 0;
+    font-size: 8pt;
+}
+
+#line-background
+{
+    background-image: url('mdocimages/treebg.png');
+    background-repeat: repeat-y;
+    height: 100%;
+}
+
+#left-content
+{
+    float: left;
+    width: 186px;
+    padding-top: 5px;
+    margin-right: 5px;
+    overflow: hidden;
+}
+
+#right-content
+{
+    padding-top: 0px;
+    overflow: auto;
+    height: 100%;
+}
+
+.right-content-pad
+{
+    margin: 6px 10px 0px 10px;
+}
+
+.named-header
+{
+    background: url('mdocimages/hatch.png') repeat-x left bottom;
+    height: 48px;
+    background-color: #FAFBFD;
+    font-size: 16pt;
+    font-weight: bold;
+    padding: 8px 0 0 10px;
+    font-family: 'Segoe UI',Verdana,Arial, sans-serif;
+}
+
+.member-list
+{
+    border-spacing: 0px;
+}
+
+.member-list td
+{
+    padding: 4px;
+    margin: 0px;
+    border-bottom: 1px dotted #CCCCCC;
+}
+
+.member-list
+{
+    
+}
+/* TAB MENU   
+----------------------------------------------------------*/
+ul#menu
+{
+    border-bottom: 1px #5C87B2 solid;
+    padding: 0 0 2px;
+    position: relative;
+    margin: 0;
+    text-align: right;
+}
+
+ul#menu li
+{
+    display: inline;
+    list-style: none;
+}
+
+ul#menu li#greeting
+{
+    padding: 10px 20px;
+    font-weight: bold;
+    text-decoration: none;
+    line-height: 2.8em;
+    color: #fff;
+}
+
+ul#menu li a
+{
+    padding: 10px 20px;
+    font-weight: bold;
+    text-decoration: none;
+    line-height: 2.8em;
+    background-color: #e8eef4;
+    color: #034af3;
+}
+
+ul#menu li a:hover
+{
+    background-color: #fff;
+    text-decoration: none;
+}
+
+ul#menu li a:active
+{
+    background-color: #a6e2a6;
+    text-decoration: none;
+}
+
+ul#menu li.selected a
+{
+    background-color: #fff;
+    color: #000;
+}
+
+/* FORM LAYOUT ELEMENTS   
+----------------------------------------------------------*/
+
+fieldset 
+{
+    margin: 1em 0;
+    padding: 1em;
+    border: 1px solid #CCC;
+}
+
+fieldset p 
+{
+    margin: 2px 12px 10px 10px;
+}
+
+legend 
+{
+    font-size: 11pt;
+    font-weight: 600;
+    padding: 2px 4px 8px 4px;
+}
+
+input[type="text"] 
+{
+    width: 200px;
+    border: 1px solid #CCC;
+}
+
+input[type="password"] 
+{
+    width: 200px;
+    border: 1px solid #CCC;
+}
+
+/* TABLE
+----------------------------------------------------------*/
+
+table 
+{
+/*  border: solid 1px #e8eef4;
+  border-collapse: collapse;*/
+}
+
+table td 
+{
+  padding: 5px;   
+/*  border: solid 1px #e8eef4;*/
+}
+
+table th
+{
+  padding: 6px 5px;
+  text-align: left;
+  background-color: #e8eef4; 
+  border: solid 1px #e8eef4;   
+}
+
+/* MISC  
+----------------------------------------------------------*/
+.clear
+{
+    clear: both;
+}
+
+.error
+{
+    color:Red;
+}
+
+.indent
+{
+    margin-left: 20px;
+    margin-right: 20px;
+}
+
+#menucontainer
+{
+    margin-top:40px;
+}
+
+div#title
+{
+    display:block;
+    float:left;
+    text-align:left;
+}
+
+#logindisplay
+{
+    font-size:11pt;
+    display:block;
+    text-align:right;
+    margin:0px;
+    color:White;
+}
+
+#logindisplay a:link
+{
+    color: white;
+    text-decoration: underline;
+}
+
+#logindisplay a:visited
+{
+    color: white;
+    text-decoration: underline;
+}
+
+#logindisplay a:hover
+{
+    color: white;
+    text-decoration: none;
+}
+
+/* Styles for validation helpers
+-----------------------------------------------------------*/
+.field-validation-error
+{
+    color: #ff0000;
+}
+
+.field-validation-valid
+{
+    display: none;
+}
+
+.input-validation-error
+{
+    border: 1px solid #ff0000;
+    background-color: #ffeeee;
+}
+
+.validation-summary-errors
+{
+    font-weight: bold;
+    color: #ff0000;
+}
+
+.validation-summary-valid
+{
+    display: none;
+}
+
+/* Styles for editor and display helpers
+----------------------------------------------------------*/
+.display-label,
+.editor-label,
+.display-field,
+.editor-field
+{
+    margin: 0.5em 0;
+}
+
+.text-box
+{
+    width: 30em;
+}
+
+.text-box.multi-line
+{
+    height: 6.5em;
+}
+
+.tri-state
+{
+    width: 6em;
+}
+
+/* Breadcrumb Bar */
+.breadcrumb
+{
+    border-left: 1px solid #cacaca;
+    border-right: 1px solid #cacaca;
+    border-bottom: 1px solid #cacaca;
+    background-image: url('mdocimages/bc_bg.png');
+    background-repeat: repeat-x;
+    height: 25px;
+    line-height: 25px;
+    color: #454545;
+    border-top: 0px;
+    width: 100%;
+    overflow: hidden;
+    margin-left: -2px;
+    padding: 0px;
+    font-style: normal;
+    font-variant: normal;
+    font-weight: normal;
+    font-size: 11px;
+    font-family: Arial, Helvetica, sans-serif;
+    margin-right: 0px;
+    margin-top: 0px;
+    margin-bottom: 0px;
+}
+
+.breadcrumb li
+{
+    list-style-type: none;
+    float: left;
+    padding-left: 25px;
+    background-position: 5px center;
+    background-repeat: no-repeat;
+}
+
+.breadcrumb li.pubclass { background-image: url('mdocimages/pubclass.png'); }
+.breadcrumb li.pubdelegate { background-image: url('mdocimages/pubdelegate.png'); }
+.breadcrumb li.pubenumeration { background-image: url('mdocimages/pubenumeration.png'); }
+.breadcrumb li.pubevent { background-image: url('mdocimages/pubevent.png'); }
+.breadcrumb li.pubextension { background-image: url('mdocimages/pubextension.png'); }
+.breadcrumb li.pubfield { background-image: url('mdocimages/pubfield.png'); }
+.breadcrumb li.pubinterface { background-image: url('mdocimages/pubinterface.png'); }
+.breadcrumb li.pubmethod { background-image: url('mdocimages/pubmethod.png'); }
+.breadcrumb li.pubproperty { background-image: url('mdocimages/pubproperty.png'); }
+.breadcrumb li.pubstructure { background-image: url('mdocimages/pubstructure.png'); }
+
+.breadcrumb li.protclass { background-image: url('mdocimages/protclass.png'); }
+.breadcrumb li.protdelegate { background-image: url('mdocimages/protdelegate.png'); }
+.breadcrumb li.protenumeration { background-image: url('mdocimages/protenumeration.png'); }
+.breadcrumb li.protevent { background-image: url('mdocimages/protevent.png'); }
+.breadcrumb li.protextension { background-image: url('mdocimages/protextension.png'); }
+.breadcrumb li.protfield { background-image: url('mdocimages/protfield.png'); }
+.breadcrumb li.protinterface { background-image: url('mdocimages/protinterface.png'); }
+.breadcrumb li.protmethod { background-image: url('mdocimages/protmethod.png'); }
+.breadcrumb li.protproperty { background-image: url('mdocimages/protproperty.png'); }
+.breadcrumb li.protstructure { background-image: url('mdocimages/protstructure.png'); }
+
+.breadcrumb li.privclass { background-image: url('mdocimages/privclass.png'); }
+.breadcrumb li.privdelegate { background-image: url('mdocimages/privdelegate.png'); }
+.breadcrumb li.privenumeration { background-image: url('mdocimages/privenumeration.png'); }
+.breadcrumb li.privevent { background-image: url('mdocimages/privevent.png'); }
+.breadcrumb li.privextension { background-image: url('mdocimages/privextension.png'); }
+.breadcrumb li.privfield { background-image: url('mdocimages/privfield.png'); }
+.breadcrumb li.privinterface { background-image: url('mdocimages/privinterface.png'); }
+.breadcrumb li.privmethod { background-image: url('mdocimages/privmethod.png'); }
+.breadcrumb li.privproperty { background-image: url('mdocimages/privproperty.png'); }
+.breadcrumb li.privstructure { background-image: url('mdocimages/privstructure.png'); }
+
+.breadcrumb li.namespace 
+{
+    padding-left: 26px;
+    background-image: url('mdocimages/namespace.png');
+}
+
+.breadcrumb li.reference 
+{
+    padding-left: 26px;
+    background-image: url('mdocimages/reference.png');
+}
+
+.breadcrumb li.members 
+{
+    padding-left: 24px;
+    background-image: url('mdocimages/members.png');
+}
+
+.breadcrumb li.home 
+{
+    padding-left: 31px;
+    background-image: url('mdocimages/house.png');
+    background-position: 8px center;
+}
+
+.breadcrumb li.help 
+{
+    background-image: url('mdocimages/help.png');
+}
+
+.breadcrumb li.unrecognized 
+{
+    background-image: url('mdocimages/error.png');
+}
+
+.breadcrumb a
+{
+    height: 25px;
+    display: block;
+    background-image: url('mdocimages/bc_separator.png');
+    background-repeat: no-repeat;
+    background-position: right;
+    padding-right: 15px;
+    color: #454545;
+    text-decoration: none;
+}
+
+.breadcrumb a:hover
+{
+    text-decoration: underline;
+}
+
+.clearer
+{
+    clear: both;
+}
+
+div.Signature {
+  border: 1px solid #C0C0C0;
+  background: #F2F2F2;
+  padding: 1em;
+  margin-left: 1em;
+}
+
+div.Content {
+    margin-left: 1em;
+}
+
+.SectionBox {
+    margin-left: 1em;
+}
+
+/* Salvaged from the old style */
+table.Documentation, table.Enumeration, table.TypeDocumentation {
+  border-collapse: collapse;
+  width: 100%;
+}
+
+table.Documentation tr th, table.TypeMembers tr th, table.Enumeration tr th, table.TypeDocumentation tr th {
+  background: whitesmoke;
+  padding: 0.8em;
+  border: 1px solid gray;
+  text-align: left;
+  vertical-align: bottom;
+}
+
+table.Documentation tr td, table.TypeMembers tr td, table.Enumeration tr td, table.TypeDocumentation tr td {
+  padding: 0.5em;
+  border: 1px solid gray;
+  text-align: left;
+  vertical-align: top;
+}
+
+table.TypeMembers {
+  border: 1px solid #C0C0C0;
+  width: 100%;
+}
+
+table.TypeMembers tr td {
+  background: #F8F8F8;
+  border: white;
+}
+
+span.NotEntered /* Documentation for this section has not yet been entered */ {
+       font-style: italic;
+       color: #aaa;    
+}
diff --git a/mcs/tools/monkeydoc/Resources/mono-ecma.xsl b/mcs/tools/monkeydoc/Resources/mono-ecma.xsl
new file mode 100644 (file)
index 0000000..76356d2
--- /dev/null
@@ -0,0 +1,42 @@
+<?xml version="1.0"?>
+
+<!--
+       mono-ecma.xsl: ECMA-style docs to HTML stylesheet trasformation
+
+       Author: Joshua Tauberer (tauberer@for.net)
+
+       TODO:
+               split this into multiple files
+-->
+
+<xsl:stylesheet
+       version="1.0"
+       xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
+       xmlns:monodoc="monodoc:///extensions"
+       exclude-result-prefixes="monodoc"
+       >
+       <xsl:import href="mdoc-sections.xsl" />
+       <xsl:import href="mono-ecma-impl.xsl" />
+       
+       <xsl:output omit-xml-declaration="yes" />
+
+       <xsl:template name="CreateCodeBlock">
+               <xsl:param name="language" />
+               <xsl:param name="content" />
+               <table class="CodeExampleTable" bgcolor="#f5f5dd" border="1" cellpadding="5" width="100%">
+                       <tr><td><b><xsl:value-of select="$language"/> Example</b></td></tr>
+                       <tr>
+                               <td>
+                                       <!--
+                                       <xsl:value-of select="monodoc:Colorize($content, string($language))" 
+                                               disable-output-escaping="yes" />
+                                               -->
+                                       <pre>
+                                               <xsl:value-of select="$content" />
+                                       </pre>
+                               </td>
+                       </tr>
+               </table>
+       </xsl:template>
+
+</xsl:stylesheet>
diff --git a/mcs/tools/monkeydoc/Resources/toc-html.xsl b/mcs/tools/monkeydoc/Resources/toc-html.xsl
new file mode 100644 (file)
index 0000000..ea4d564
--- /dev/null
@@ -0,0 +1,32 @@
+<?xml version="1.0" encoding="utf-8"?>
+<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
+  <xsl:output method="html" />
+
+  <xsl:template match="/toc">
+       <table bgcolor="#b0c4de" width="100%" cellpadding="5"><tr><td><h3><xsl:value-of select="@title" /></h3></td></tr></table>
+       <xsl:apply-templates />
+  </xsl:template>
+
+  <xsl:template match="description">
+       <p><xsl:value-of select="." /></p>
+  </xsl:template>
+
+  <xsl:template match="list">
+       <ul>
+         <xsl:apply-templates />
+       </ul>
+  </xsl:template>
+
+  <xsl:template match="item">
+       <xsl:choose>
+         <xsl:when test="list">
+               <li>
+               <xsl:apply-templates select="list" />
+               </li>
+         </xsl:when>
+         <xsl:otherwise>
+               <li><a href="{@url}"><xsl:value-of select="." /></a></li>
+         </xsl:otherwise>
+       </xsl:choose>
+  </xsl:template>
+</xsl:stylesheet>
\ No newline at end of file
diff --git a/mcs/tools/monkeydoc/Test/Monkeydoc.Ecma/EcmaUrlTests.cs b/mcs/tools/monkeydoc/Test/Monkeydoc.Ecma/EcmaUrlTests.cs
new file mode 100644 (file)
index 0000000..8a9f3a7
--- /dev/null
@@ -0,0 +1,421 @@
+using System;
+using System.IO;
+using System.Linq;
+using System.Collections.Generic;
+
+using NUnit.Framework;
+
+using MonkeyDoc;
+using Monkeydoc.Ecma;
+
+namespace MonoTests.MonkeyDoc.Ecma
+{
+       [TestFixture]
+       public class EcmaUrlTests
+       {
+               EcmaUrlParser parser;
+
+               [SetUp]
+               public void Setup ()
+               {
+                       parser = new EcmaUrlParser ();
+               }
+               
+               void AssertValidUrl (string url)
+               {
+                       try {
+                               parser.IsValid (url);
+                       } catch {
+                               Assert.Fail (string.Format ("URL '{0}' deemed not valid", url));
+                       }
+               }
+
+               void AssertInvalidUrl (string url)
+               {
+                       try {
+                               parser.IsValid (url);
+                       } catch {
+                               return;
+                       }
+                       Assert.Fail (string.Format ("URL '{0}' deemed valid", url));
+               }
+
+               void AssertUrlDesc (EcmaDesc expected, string url)
+               {
+                       EcmaDesc actual = null;
+                       try {
+                               actual = parser.Parse (url);
+                       } catch (Exception e) {
+                               Assert.Fail (string.Format ("URL '{0}' deemed not valid: {1}{2}", url, Environment.NewLine, e.ToString ()));
+                       }
+
+                       Assert.AreEqual (expected, actual, "Converted URL differs");
+               }
+
+               [Test]
+               public void CommonMethodUrlIsValidTest ()
+               {
+                       AssertValidUrl ("M:System.String.FooBar()");
+                       AssertValidUrl ("M:System.String.FooBar(System.String, Int32)");
+                       AssertValidUrl ("M:System.Foo.Int32<System.String+FooBar<System.Blop<T, U`2>>>.Foo()");
+                       AssertValidUrl ("M:System.Foo.Int32<System.String+FooBar<System.Blop<T, U`2>>>.Foo(Bleh,Bar)");
+                       AssertValidUrl ("M:System.Foo.Int32<System.String+FooBar<System.Blop<T, U`2>>>.Foo(Bleh<V>,Bar)");
+                       AssertValidUrl ("M:Gendarme.Framework.Helpers.Log.WriteLine(string,string,object[])");
+                       AssertValidUrl ("M:Mono.Security.X509.Extensions.SubjectKeyIdentifierExtension.Decode");
+                       AssertValidUrl ("M:Mono.Security.PKCS7.IssuerAndSerialNumber");
+               }
+
+               [Test]
+               public void CommonTypeUrlIsValidTest ()
+               {
+                       AssertValidUrl ("T:Int32");
+                       AssertValidUrl ("T:System.Foo.Int32");
+                       AssertValidUrl ("T:System.Foo.Int32<System.String+FooBar`1>");
+                       AssertValidUrl ("T:System.Foo.Int32<System.String+FooBar<System.Blop<T, U>>>");
+                       AssertValidUrl ("T:System.Foo.Int32<T>");
+                       AssertValidUrl ("T:System.Foo.Int32<T,U>");
+                       AssertValidUrl ("T:System.Foo.Int32<System.String+FooBar<System.Blop<T, U>>>");
+                       AssertValidUrl ("T:System.Foo.Int32<System.String+FooBar<System.Blop<T, U`2>>>");
+               }
+
+               [Test]
+               public void CommonTypeUrlNotValidTest ()
+               {
+                       AssertInvalidUrl ("TInt32");
+                       AssertInvalidUrl ("K:Int32");
+                       AssertInvalidUrl ("T:System..Foo.Int32");
+                       AssertInvalidUrl ("T:System.Foo.Int32<System.String+FooBar`1");
+                       AssertInvalidUrl ("T:System.Foo.Int32<System.String+FooBarSystem.Blop<T, U>>>");
+                       AssertInvalidUrl ("T:System.Foo.Int32<T,>");
+                       AssertInvalidUrl ("T:System.Foo.Int32<+FooBar<System.Blop<T, U>>>");
+               }
+
+               [Test]
+               public void NamespaceValidTest ()
+               {
+                       AssertValidUrl ("N:Foo.Bar");
+                       AssertValidUrl ("N:Foo");
+                       AssertValidUrl ("N:Foo.Bar.Baz");
+                       AssertValidUrl ("N:A.B.C");
+
+                       var ast = new EcmaDesc () { DescKind = EcmaDesc.Kind.Namespace,
+                                                   Namespace = "Foo.Bar.Blop" };
+                       AssertUrlDesc (ast, "N:Foo.Bar.Blop");
+               }
+
+               [Test]
+               public void ConstructorValidTest ()
+               {
+                       AssertValidUrl ("C:Gendarme.Rules.Concurrency.DecorateThreadsRule.DecorateThreadsRule");
+                       AssertValidUrl ("C:Gendarme.Rules.Concurrency.DecorateThreadsRule.DecorateThreadsRule()");
+                       AssertValidUrl ("C:Gendarme.Rules.Concurrency.DecorateThreadsRule.DecorateThreadsRule(System.String)");
+                       AssertValidUrl ("C:Gendarme.Framework.Helpers.MethodSignature.MethodSignature(string,string,string[],System.Func<Mono.Cecil.MethodReference,System.Boolean>)");
+                       AssertValidUrl ("C:System.Collections.Generic.Dictionary<TKey,TValue>+KeyCollection.KeyCollection(System.Collections.Generic.Dictionary<TKey,TValue>)");
+               }
+
+               [Test]
+               public void SlashExpressionValidTest ()
+               {
+                       AssertValidUrl ("T:Foo.Bar.Type/*");
+                       AssertValidUrl ("T:Foo.Bar.Type/M");
+                       AssertValidUrl ("T:Gendarme.Framework.Bitmask<T>/M/Equals");
+                       AssertValidUrl ("T:Gendarme.Framework.Helpers.Log/M/WriteLine<T>");
+                       AssertValidUrl ("T:System.Windows.Forms.AxHost/M/System.ComponentModel.ICustomTypeDescriptor.GetEvents");
+               }
+
+               [Test]
+               public void MethodWithArgModValidTest ()
+               {
+                       AssertValidUrl ("M:Foo.Bar.FooBar(int, System.Drawing.Imaging&)");
+                       AssertValidUrl ("M:Foo.Bar.FooBar(int@, System.Drawing.Imaging)");
+                       AssertValidUrl ("M:Foo.Bar.FooBar(int, System.Drawing.Imaging*)");
+                       AssertValidUrl ("M:Foo.Bar.FooBar(int*, System.Drawing.Imaging&)");
+                       AssertValidUrl ("M:Atk.NoOpObject.GetRunAttributes(int,int&,int&)");
+               }
+
+               [Test]
+               public void MethodWithJaggedArrayArgsValidTest ()
+               {
+                       AssertValidUrl ("M:System.Reflection.Emit.SignatureHelper.GetPropertySigHelper(System.Reflection.Module,System.Reflection.CallingConventions,Type,Type[],Type[],Type[],Type[][],Type[][])");
+               }
+
+               [Test]
+               public void MethodWithInnerTypeValidTest ()
+               {
+                       AssertValidUrl ("M:System.TimeZoneInfo+AdjustmentRule.CreateAdjustmentRule");
+               }
+
+               [Test]
+               public void FieldValidTest ()
+               {
+                       AssertValidUrl ("F:Mono.Terminal.Curses.KeyF10");
+                       AssertValidUrl ("F:Novell.Directory.Ldap.Utilclass.ExceptionMessages.NOT_IMPLEMENTED");
+                       AssertValidUrl ("F:Novell.Directory.Ldap.LdapException.NOT_ALLOWED_ON_NONLEAF");
+               }
+
+               [Test]
+               public void PropertyValidTest ()
+               {
+                       AssertValidUrl ("P:System.Foo.Bar");
+                       AssertValidUrl ("P:System.ArraySegment<T>.Array");
+               }
+
+               [Test]
+               public void IndexPropertyValidTest ()
+               {
+                       AssertValidUrl ("P:System.ComponentModel.PropertyDescriptorCollection.Item(int)");
+                       AssertValidUrl ("P:System.ComponentModel.AttributeCollection.Item(Type)");
+                       AssertValidUrl ("P:System.Web.SessionState.HttpSessionStateContainer$System.Web.SessionState.IHttpSessionState.Item(int)");
+                       AssertValidUrl ("P:System.Collections.Specialized.BitVector32.Item(System.Collections.Specialized.BitVector32+Section)");
+               }
+
+               [Test]
+               public void ExplicitMethodImplValidTest ()
+               {
+                       AssertValidUrl ("M:Microsoft.Win32.RegistryKey$System.IDisposable.Dispose");
+               }
+
+               [Test]
+               public void MetaEtcNodeTest ()
+               {
+                       var ast = new EcmaDesc () { DescKind = EcmaDesc.Kind.Type,
+                                                   Namespace = "Foo.Bar",
+                                                   TypeName = "Type",
+                                                   Etc = '*' };
+                       AssertUrlDesc (ast, "T:Foo.Bar.Type/*");
+               }
+
+               [Test]
+               public void MetaEtcWithInnerTypeTest ()
+               {
+                       var ast = new EcmaDesc () { DescKind = EcmaDesc.Kind.Type,
+                                                   Namespace = "Novell.Directory.Ldap",
+                                                   TypeName = "Connection",
+                                                   NestedType = new EcmaDesc { DescKind = EcmaDesc.Kind.Type, TypeName = "ReaderThread" },
+                                                   Etc = '*' };
+                       AssertUrlDesc (ast, "T:Novell.Directory.Ldap.Connection+ReaderThread/*");
+               }
+
+               [Test]
+               public void SimpleTypeUrlParseTest ()
+               {
+                       var ast = new EcmaDesc () { DescKind = EcmaDesc.Kind.Type,
+                                                   TypeName = "String",
+                                                   Namespace = "System" };
+                       AssertUrlDesc (ast, "T:System.String");
+               }
+
+               [Test]
+               public void TypeWithOneGenericUrlParseTest ()
+               {
+                       var generics = new[] {
+                               new EcmaDesc {
+                                       DescKind = EcmaDesc.Kind.Type,
+                                       TypeName = "T"
+                               }
+                       };
+                       var ast = new EcmaDesc () { DescKind = EcmaDesc.Kind.Type,
+                                                   TypeName = "String",
+                                                   Namespace = "System",
+                                                   GenericTypeArguments = generics,
+                       };
+
+                       AssertUrlDesc (ast, "T:System.String<T>");
+               }
+
+               [Test]
+               public void TypeWithNestedGenericUrlParseTest ()
+               {
+                       var generics = new[] {
+                               new EcmaDesc {
+                                       DescKind = EcmaDesc.Kind.Type,
+                                       TypeName = "T"
+                               },
+                               new EcmaDesc {
+                                       DescKind = EcmaDesc.Kind.Type,
+                                       Namespace = "System.Collections.Generic",
+                                       TypeName = "List",
+                                       GenericTypeArguments = new[] {
+                                               new EcmaDesc {
+                                                       DescKind = EcmaDesc.Kind.Type,
+                                                       TypeName = "V"
+                                               }
+                                       }
+                               }
+                       };
+                       var ast = new EcmaDesc () { DescKind = EcmaDesc.Kind.Type,
+                                                   TypeName = "String",
+                                                   Namespace = "System",
+                                                   GenericTypeArguments = generics,
+                       };
+
+                       AssertUrlDesc (ast, "T:System.String<T, System.Collections.Generic.List<V>>");
+               }
+
+               [Test]
+               public void SimpleMethodUrlParseTest ()
+               {
+                       var ast = new EcmaDesc () { DescKind = EcmaDesc.Kind.Method,
+                                                   TypeName = "String",
+                                                   Namespace = "System",
+                                                   MemberName = "FooBar"
+                       };
+                       AssertUrlDesc (ast, "M:System.String.FooBar()");
+               }
+
+               [Test]
+               public void MethodWithArgsUrlParseTest ()
+               {
+                       var args = new[] {
+                               new EcmaDesc {
+                                       DescKind = EcmaDesc.Kind.Type,
+                                       Namespace = "System",
+                                       TypeName = "String"
+                               },
+                               new EcmaDesc {
+                                       DescKind = EcmaDesc.Kind.Type,
+                                       TypeName = "Int32"
+                               }
+                       };
+                       var ast = new EcmaDesc () { DescKind = EcmaDesc.Kind.Method,
+                                                   TypeName = "String",
+                                                   Namespace = "System",
+                                                   MemberName = "FooBar",
+                                                   MemberArguments = args
+                       };
+                       AssertUrlDesc (ast, "M:System.String.FooBar(System.String, Int32)");
+               }
+
+               [Test]
+               public void MethodWithArgsAndGenericsUrlParseTest ()
+               {
+                       var args = new[] {
+                               new EcmaDesc {
+                                       DescKind = EcmaDesc.Kind.Type,
+                                       Namespace = "System",
+                                       TypeName = "String"
+                               },
+                               new EcmaDesc {
+                                       DescKind = EcmaDesc.Kind.Type,
+                                       Namespace = "System.Collections.Generic",
+                                       TypeName = "Dictionary",
+                                       GenericTypeArguments = new[] {
+                                               new EcmaDesc {
+                                                       DescKind = EcmaDesc.Kind.Type,
+                                                       TypeName = "K"
+                                               },
+                                               new EcmaDesc {
+                                                       DescKind = EcmaDesc.Kind.Type,
+                                                       TypeName = "V"
+                                               }
+                                       }
+                               }
+                       };
+
+                       var generics = new[] {
+                               new EcmaDesc {
+                                       DescKind = EcmaDesc.Kind.Type,
+                                       TypeName = "Action",
+                                       GenericTypeArguments = new[] {
+                                               new EcmaDesc {
+                                                       DescKind = EcmaDesc.Kind.Type,
+                                                       Namespace = "System",
+                                                       TypeName = "Single",
+                                               },
+                                               new EcmaDesc {
+                                                       DescKind = EcmaDesc.Kind.Type,
+                                                       TypeName = "int",
+                                               },
+                                       }
+                               }
+                       };
+
+                       var ast = new EcmaDesc () { DescKind = EcmaDesc.Kind.Method,
+                                                   TypeName = "String",
+                                                   Namespace = "System",
+                                                   MemberName = "FooBar",
+                                                   MemberArguments = args,
+                                                   GenericMemberArguments = generics
+                       };
+                       AssertUrlDesc (ast, "M:System.String.FooBar<Action<System.Single, int>>(System.String, System.Collections.Generic.Dictionary<K, V>)");
+               }
+
+               [Test]
+               public void ExplicitMethodImplementationParseTest ()
+               {
+                       var inner = new EcmaDesc {
+                               MemberName = "Dispose",
+                               TypeName = "IDisposable",
+                               Namespace = "System"
+                       };
+                       var ast = new EcmaDesc {
+                               DescKind = EcmaDesc.Kind.Method,
+                               TypeName = "RegistryKey",
+                               Namespace = "Microsoft.Win32",
+                               ExplicitImplMember = inner
+                       };
+                       AssertUrlDesc (ast, "M:Microsoft.Win32.RegistryKey$System.IDisposable.Dispose");
+                       var actual = parser.Parse ("M:Microsoft.Win32.RegistryKey$System.IDisposable.Dispose");
+                       Assert.IsNotNull (actual.ExplicitImplMember);
+                       Assert.AreEqual ("System.IDisposable.Dispose", ast.ToCompleteMemberName (EcmaDesc.Format.WithoutArgs));
+               }
+
+               [Test]
+               public void SimpleMethodWithNumberInType ()
+               {
+                       var ast = new EcmaDesc {
+                               DescKind = EcmaDesc.Kind.Method,
+                               TypeName = "ASN1",
+                               Namespace = "Mono.Security",
+                               MemberName = "Add"
+                       };
+                       AssertUrlDesc (ast, "M:Mono.Security.ASN1.Add");
+               }
+
+               [Test]
+               public void JaggedArrayWithDimensions ()
+               {
+                       var ast = new EcmaDesc {
+                               DescKind = EcmaDesc.Kind.Type,
+                               TypeName = "Int32",
+                               Namespace = "System",
+                               ArrayDimensions = new int[] { 3, 1, 1 }
+                       };
+                       AssertUrlDesc (ast, "T:System.Int32[,,][][]");
+               }
+
+               /*              [Test]
+               public void TreeParsabilityTest ()
+               {
+                       var rootTree = RootTree.LoadTree ("/home/jeremie/monodoc/");
+                       Node result;
+                       var generator = new CheckGenerator ();
+
+                       foreach (var leaf in GetLeaves (rootTree.RootNode).Where (IsEcmaNode))
+                               AssertUrl (leaf.PublicUrl);
+               }
+
+               IEnumerable<Node> GetLeaves (Node node)
+               {
+                       if (node == null)
+                               yield break;
+
+                       if (node.IsLeaf)
+                               yield return node;
+                       else {
+                               foreach (var child in node.Nodes) {
+                                       if (!string.IsNullOrEmpty (child.Element) && !child.Element.StartsWith ("root:/"))
+                                               yield return child;
+                                       foreach (var childLeaf in GetLeaves (child))
+                                               yield return childLeaf;
+                               }
+                       }
+               }
+
+               bool IsEcmaNode (Node node)
+               {
+                       var url = node.PublicUrl;
+                       return url != null && url.Length > 2 && url[1] == ':';
+               }*/
+       }
+}
\ No newline at end of file
diff --git a/mcs/tools/monkeydoc/Test/Monkeydoc/HelpSourceTests.cs b/mcs/tools/monkeydoc/Test/Monkeydoc/HelpSourceTests.cs
new file mode 100644 (file)
index 0000000..876f3f8
--- /dev/null
@@ -0,0 +1,99 @@
+using System;
+using System.IO;
+using System.Linq;
+using System.Collections.Generic;
+
+using NUnit.Framework;
+
+using MonkeyDoc;
+
+namespace MonoTests.MonkeyDoc
+{
+       [TestFixture]
+       public class HelpSourceTest
+       {
+               const string BaseDir = "../../tools/monkeydoc/Test/monodoc/";
+
+               class CheckGenerator : IDocGenerator<bool>
+               {
+                       public string LastCheckMessage { get; set; }
+
+                       public bool Generate (HelpSource hs, string id)
+                       {
+                               LastCheckMessage = string.Format ("#1 : {0} {1}", hs, id);
+                               if (hs == null || string.IsNullOrEmpty (id))
+                                       return false;
+
+                               // Stripe the arguments parts since we don't need it
+                               var argIdx = id.LastIndexOf ('?');
+                               if (argIdx != -1)
+                                       id = id.Substring (0, argIdx);
+
+                               LastCheckMessage = string.Format ("#2 : {0} {1}", hs, id);
+                               if (hs.IsRawContent (id))
+                                       return hs.GetText (id) != null;
+
+                               IEnumerable<string> parts;
+                               if (hs.IsMultiPart (id, out parts)) {
+                                       LastCheckMessage = string.Format ("#4 : {0} {1} ({2})", hs, id, string.Join (", ", parts));
+                                       foreach (var partId in parts)
+                                               if (!Generate (hs, partId))
+                                                       return false;
+                               }
+
+                               LastCheckMessage = string.Format ("#3 : {0} {1}", hs, id);
+                               if (hs.IsGeneratedContent (id))
+                                       return hs.GetCachedText (id) != null;
+                               else {
+                                       var s = hs.GetCachedHelpStream (id);
+                                       if (s != null) {
+                                               s.Close ();
+                                               return true;
+                                       } else {
+                                               return false;
+                                       }
+                               }
+                       }
+               }
+
+               /* This test verifies that for every node in our tree that possed a PublicUrl,
+                * we can correctly access it back through RenderUrl
+                */
+               [Test]
+               public void ReachabilityTest ()
+               {
+                       var rootTree = RootTree.LoadTree (Path.GetFullPath (BaseDir), false);
+                       Node result;
+                       var generator = new CheckGenerator ();
+                       int errorCount = 0;
+                       int testCount = 0;
+
+                       foreach (var leaf in GetLeaves (rootTree.RootNode)) {
+                               if (!rootTree.RenderUrl (leaf.PublicUrl, generator, out result) || leaf != result) {
+                                       Console.WriteLine ("Error: " + leaf.PublicUrl);
+                                       errorCount++;
+                               }
+                               testCount++;
+                       }
+
+                       Assert.AreEqual (0, errorCount, errorCount + " / " + testCount.ToString ());
+               }
+
+               IEnumerable<Node> GetLeaves (Node node)
+               {
+                       if (node == null)
+                               yield break;
+
+                       if (node.IsLeaf)
+                               yield return node;
+                       else {
+                               foreach (var child in node.Nodes) {
+                                       if (!string.IsNullOrEmpty (child.Element) && !child.Element.StartsWith ("root:/"))
+                                               yield return child;
+                                       foreach (var childLeaf in GetLeaves (child))
+                                               yield return childLeaf;
+                               }
+                       }
+               }
+       }
+}
\ No newline at end of file
diff --git a/mcs/tools/monkeydoc/Test/monodoc/monodoc.xml b/mcs/tools/monkeydoc/Test/monodoc/monodoc.xml
new file mode 100644 (file)
index 0000000..94a4b22
--- /dev/null
@@ -0,0 +1,7 @@
+<?xml version="1.0"?>
+<node label="Mono Documentation" name="libraries">
+  <node label="Commands and Files" name="man" />
+  <node label="Languages" name="languages" />
+  <node label="Tools" name="tools" />
+  <node label="Various" name="various" />
+</node>
diff --git a/mcs/tools/monkeydoc/monkeydoc.dll.config.in b/mcs/tools/monkeydoc/monkeydoc.dll.config.in
new file mode 100644 (file)
index 0000000..251f778
--- /dev/null
@@ -0,0 +1,6 @@
+<config>
+        <appSettings>
+                <add key="docPath" value="@monodoc_refdir@" />
+                <add key="docExternalPath" value="" />
+        </appSettings>
+</config>
diff --git a/mcs/tools/monkeydoc/monkeydoc.dll.sources b/mcs/tools/monkeydoc/monkeydoc.dll.sources
new file mode 100644 (file)
index 0000000..31ac254
--- /dev/null
@@ -0,0 +1,452 @@
+Assembly/AssemblyInfo.cs
+Monkeydoc/SearchableIndex.cs
+Monkeydoc/SearchableDocument.cs
+Monkeydoc/storage/ZipStorage.cs
+Monkeydoc/providers/man-provider.cs
+Monkeydoc/providers/ecmaspec-provider.cs
+Monkeydoc/providers/error-provider.cs
+Monkeydoc/providers/xhtml-provider.cs
+Monkeydoc/providers/ecma-provider.cs
+Monkeydoc/HelpSource.cs
+Monkeydoc/Tree.cs
+Monkeydoc/generator.cs
+Monkeydoc/caches/NullCache.cs
+Monkeydoc/caches/FileCache.cs
+Monkeydoc/storage.cs
+Monkeydoc/Provider.cs
+Monkeydoc/cache.cs
+Monkeydoc/index.cs
+Monkeydoc/RootTree.cs
+Monkeydoc/TypeUtils.cs
+Monkeydoc/generators/html/Man2Html.cs
+Monkeydoc/generators/html/Toc2Html.cs
+Monkeydoc/generators/html/Ecmaspec2Html.cs
+Monkeydoc/generators/html/Error2Html.cs
+Monkeydoc/generators/html/MonoBook2Html.cs
+Monkeydoc/generators/html/Ecma2Html.cs
+Monkeydoc/generators/html/Idem.cs
+Monkeydoc/generators/HtmlGenerator.cs
+Mono.Utilities/colorizer.cs
+Mono.Utilities/LRUCache.cs
+Monkeydoc.Ecma/EcmaUrlParser.cs
+Monkeydoc.Ecma/EcmaUrlTokenizer.cs
+Monkeydoc.Ecma/EcmaDesc.cs
+Mono.Documentation/ManifestResourceResolver.cs
+Mono.Documentation/XmlDocUtils.cs
+Lucene.Net/Lucene.Net/Analysis/Analyzer.cs
+Lucene.Net/Lucene.Net/Analysis/ASCIIFoldingFilter.cs
+Lucene.Net/Lucene.Net/Analysis/BaseCharFilter.cs
+Lucene.Net/Lucene.Net/Analysis/CachingTokenFilter.cs
+Lucene.Net/Lucene.Net/Analysis/CharacterCache.cs
+Lucene.Net/Lucene.Net/Analysis/CharArraySet.cs
+Lucene.Net/Lucene.Net/Analysis/CharFilter.cs
+Lucene.Net/Lucene.Net/Analysis/CharReader.cs
+Lucene.Net/Lucene.Net/Analysis/CharStream.cs
+Lucene.Net/Lucene.Net/Analysis/CharTokenizer.cs
+Lucene.Net/Lucene.Net/Analysis/ISOLatin1AccentFilter.cs
+Lucene.Net/Lucene.Net/Analysis/KeywordAnalyzer.cs
+Lucene.Net/Lucene.Net/Analysis/KeywordTokenizer.cs
+Lucene.Net/Lucene.Net/Analysis/LengthFilter.cs
+Lucene.Net/Lucene.Net/Analysis/LetterTokenizer.cs
+Lucene.Net/Lucene.Net/Analysis/LowerCaseFilter.cs
+Lucene.Net/Lucene.Net/Analysis/LowerCaseTokenizer.cs
+Lucene.Net/Lucene.Net/Analysis/MappingCharFilter.cs
+Lucene.Net/Lucene.Net/Analysis/NormalizeCharMap.cs
+Lucene.Net/Lucene.Net/Analysis/NumericTokenStream.cs
+Lucene.Net/Lucene.Net/Analysis/PerFieldAnalyzerWrapper.cs
+Lucene.Net/Lucene.Net/Analysis/PorterStemFilter.cs
+Lucene.Net/Lucene.Net/Analysis/PorterStemmer.cs
+Lucene.Net/Lucene.Net/Analysis/SimpleAnalyzer.cs
+Lucene.Net/Lucene.Net/Analysis/SinkTokenizer.cs
+Lucene.Net/Lucene.Net/Analysis/Standard/StandardAnalyzer.cs
+Lucene.Net/Lucene.Net/Analysis/Standard/StandardFilter.cs
+Lucene.Net/Lucene.Net/Analysis/Standard/StandardTokenizer.cs
+Lucene.Net/Lucene.Net/Analysis/Standard/StandardTokenizerImpl.cs
+Lucene.Net/Lucene.Net/Analysis/StopAnalyzer.cs
+Lucene.Net/Lucene.Net/Analysis/StopFilter.cs
+Lucene.Net/Lucene.Net/Analysis/TeeSinkTokenFilter.cs
+Lucene.Net/Lucene.Net/Analysis/TeeTokenFilter.cs
+Lucene.Net/Lucene.Net/Analysis/Token.cs
+Lucene.Net/Lucene.Net/Analysis/Tokenattributes/FlagsAttribute.cs
+Lucene.Net/Lucene.Net/Analysis/Tokenattributes/FlagsAttributeImpl.cs
+Lucene.Net/Lucene.Net/Analysis/Tokenattributes/OffsetAttribute.cs
+Lucene.Net/Lucene.Net/Analysis/Tokenattributes/OffsetAttributeImpl.cs
+Lucene.Net/Lucene.Net/Analysis/Tokenattributes/PayloadAttribute.cs
+Lucene.Net/Lucene.Net/Analysis/Tokenattributes/PayloadAttributeImpl.cs
+Lucene.Net/Lucene.Net/Analysis/Tokenattributes/PositionIncrementAttribute.cs
+Lucene.Net/Lucene.Net/Analysis/Tokenattributes/PositionIncrementAttributeImpl.cs
+Lucene.Net/Lucene.Net/Analysis/Tokenattributes/TermAttribute.cs
+Lucene.Net/Lucene.Net/Analysis/Tokenattributes/TermAttributeImpl.cs
+Lucene.Net/Lucene.Net/Analysis/Tokenattributes/TypeAttribute.cs
+Lucene.Net/Lucene.Net/Analysis/Tokenattributes/TypeAttributeImpl.cs
+Lucene.Net/Lucene.Net/Analysis/TokenFilter.cs
+Lucene.Net/Lucene.Net/Analysis/Tokenizer.cs
+Lucene.Net/Lucene.Net/Analysis/TokenStream.cs
+Lucene.Net/Lucene.Net/Analysis/TokenWrapper.cs
+Lucene.Net/Lucene.Net/Analysis/WhitespaceAnalyzer.cs
+Lucene.Net/Lucene.Net/Analysis/WhitespaceTokenizer.cs
+Lucene.Net/Lucene.Net/Analysis/WordlistLoader.cs
+Lucene.Net/Lucene.Net/Document/AbstractField.cs
+Lucene.Net/Lucene.Net/Document/CompressionTools.cs
+Lucene.Net/Lucene.Net/Document/DateField.cs
+Lucene.Net/Lucene.Net/Document/DateTools.cs
+Lucene.Net/Lucene.Net/Document/Document.cs
+Lucene.Net/Lucene.Net/Document/Field.cs
+Lucene.Net/Lucene.Net/Document/Fieldable.cs
+Lucene.Net/Lucene.Net/Document/FieldSelector.cs
+Lucene.Net/Lucene.Net/Document/FieldSelectorResult.cs
+Lucene.Net/Lucene.Net/Document/LoadFirstFieldSelector.cs
+Lucene.Net/Lucene.Net/Document/MapFieldSelector.cs
+Lucene.Net/Lucene.Net/Document/NumberTools.cs
+Lucene.Net/Lucene.Net/Document/NumericField.cs
+Lucene.Net/Lucene.Net/Document/SetBasedFieldSelector.cs
+Lucene.Net/Lucene.Net/Index/AbstractAllTermDocs.cs
+Lucene.Net/Lucene.Net/Index/AllTermDocs.cs
+Lucene.Net/Lucene.Net/Index/BufferedDeletes.cs
+Lucene.Net/Lucene.Net/Index/ByteBlockPool.cs
+Lucene.Net/Lucene.Net/Index/ByteSliceReader.cs
+Lucene.Net/Lucene.Net/Index/ByteSliceWriter.cs
+Lucene.Net/Lucene.Net/Index/CharBlockPool.cs
+Lucene.Net/Lucene.Net/Index/CheckIndex.cs
+Lucene.Net/Lucene.Net/Index/CompoundFileReader.cs
+Lucene.Net/Lucene.Net/Index/CompoundFileWriter.cs
+Lucene.Net/Lucene.Net/Index/ConcurrentMergeScheduler.cs
+Lucene.Net/Lucene.Net/Index/CorruptIndexException.cs
+Lucene.Net/Lucene.Net/Index/DefaultSkipListReader.cs
+Lucene.Net/Lucene.Net/Index/DefaultSkipListWriter.cs
+Lucene.Net/Lucene.Net/Index/DirectoryOwningReader.cs
+Lucene.Net/Lucene.Net/Index/DirectoryReader.cs
+Lucene.Net/Lucene.Net/Index/DocConsumer.cs
+Lucene.Net/Lucene.Net/Index/DocConsumerPerThread.cs
+Lucene.Net/Lucene.Net/Index/DocFieldConsumer.cs
+Lucene.Net/Lucene.Net/Index/DocFieldConsumerPerField.cs
+Lucene.Net/Lucene.Net/Index/DocFieldConsumerPerThread.cs
+Lucene.Net/Lucene.Net/Index/DocFieldConsumers.cs
+Lucene.Net/Lucene.Net/Index/DocFieldConsumersPerField.cs
+Lucene.Net/Lucene.Net/Index/DocFieldConsumersPerThread.cs
+Lucene.Net/Lucene.Net/Index/DocFieldProcessor.cs
+Lucene.Net/Lucene.Net/Index/DocFieldProcessorPerField.cs
+Lucene.Net/Lucene.Net/Index/DocFieldProcessorPerThread.cs
+Lucene.Net/Lucene.Net/Index/DocInverter.cs
+Lucene.Net/Lucene.Net/Index/DocInverterPerField.cs
+Lucene.Net/Lucene.Net/Index/DocInverterPerThread.cs
+Lucene.Net/Lucene.Net/Index/DocumentsWriter.cs
+Lucene.Net/Lucene.Net/Index/DocumentsWriterThreadState.cs
+Lucene.Net/Lucene.Net/Index/FieldInfo.cs
+Lucene.Net/Lucene.Net/Index/FieldInfos.cs
+Lucene.Net/Lucene.Net/Index/FieldInvertState.cs
+Lucene.Net/Lucene.Net/Index/FieldReaderException.cs
+Lucene.Net/Lucene.Net/Index/FieldSortedTermVectorMapper.cs
+Lucene.Net/Lucene.Net/Index/FieldsReader.cs
+Lucene.Net/Lucene.Net/Index/FieldsWriter.cs
+Lucene.Net/Lucene.Net/Index/FilterIndexReader.cs
+Lucene.Net/Lucene.Net/Index/FormatPostingsDocsConsumer.cs
+Lucene.Net/Lucene.Net/Index/FormatPostingsDocsWriter.cs
+Lucene.Net/Lucene.Net/Index/FormatPostingsFieldsConsumer.cs
+Lucene.Net/Lucene.Net/Index/FormatPostingsFieldsWriter.cs
+Lucene.Net/Lucene.Net/Index/FormatPostingsPositionsConsumer.cs
+Lucene.Net/Lucene.Net/Index/FormatPostingsPositionsWriter.cs
+Lucene.Net/Lucene.Net/Index/FormatPostingsTermsConsumer.cs
+Lucene.Net/Lucene.Net/Index/FormatPostingsTermsWriter.cs
+Lucene.Net/Lucene.Net/Index/FreqProxFieldMergeState.cs
+Lucene.Net/Lucene.Net/Index/FreqProxTermsWriter.cs
+Lucene.Net/Lucene.Net/Index/FreqProxTermsWriterPerField.cs
+Lucene.Net/Lucene.Net/Index/FreqProxTermsWriterPerThread.cs
+Lucene.Net/Lucene.Net/Index/IndexCommit.cs
+Lucene.Net/Lucene.Net/Index/IndexCommitPoint.cs
+Lucene.Net/Lucene.Net/Index/IndexDeletionPolicy.cs
+Lucene.Net/Lucene.Net/Index/IndexFileDeleter.cs
+Lucene.Net/Lucene.Net/Index/IndexFileNameFilter.cs
+Lucene.Net/Lucene.Net/Index/IndexFileNames.cs
+Lucene.Net/Lucene.Net/Index/IndexModifier.cs
+Lucene.Net/Lucene.Net/Index/IndexReader.cs
+Lucene.Net/Lucene.Net/Index/IndexWriter.cs
+Lucene.Net/Lucene.Net/Index/IntBlockPool.cs
+Lucene.Net/Lucene.Net/Index/InvertedDocConsumer.cs
+Lucene.Net/Lucene.Net/Index/InvertedDocConsumerPerField.cs
+Lucene.Net/Lucene.Net/Index/InvertedDocConsumerPerThread.cs
+Lucene.Net/Lucene.Net/Index/InvertedDocEndConsumer.cs
+Lucene.Net/Lucene.Net/Index/InvertedDocEndConsumerPerField.cs
+Lucene.Net/Lucene.Net/Index/InvertedDocEndConsumerPerThread.cs
+Lucene.Net/Lucene.Net/Index/KeepOnlyLastCommitDeletionPolicy.cs
+Lucene.Net/Lucene.Net/Index/LogByteSizeMergePolicy.cs
+Lucene.Net/Lucene.Net/Index/LogDocMergePolicy.cs
+Lucene.Net/Lucene.Net/Index/LogMergePolicy.cs
+Lucene.Net/Lucene.Net/Index/MergeDocIDRemapper.cs
+Lucene.Net/Lucene.Net/Index/MergePolicy.cs
+Lucene.Net/Lucene.Net/Index/MergeScheduler.cs
+Lucene.Net/Lucene.Net/Index/MultiLevelSkipListReader.cs
+Lucene.Net/Lucene.Net/Index/MultiLevelSkipListWriter.cs
+Lucene.Net/Lucene.Net/Index/MultipleTermPositions.cs
+Lucene.Net/Lucene.Net/Index/MultiReader.cs
+Lucene.Net/Lucene.Net/Index/NormsWriter.cs
+Lucene.Net/Lucene.Net/Index/NormsWriterPerField.cs
+Lucene.Net/Lucene.Net/Index/NormsWriterPerThread.cs
+Lucene.Net/Lucene.Net/Index/ParallelReader.cs
+Lucene.Net/Lucene.Net/Index/Payload.cs
+Lucene.Net/Lucene.Net/Index/PositionBasedTermVectorMapper.cs
+Lucene.Net/Lucene.Net/Index/RawPostingList.cs
+Lucene.Net/Lucene.Net/Index/ReadOnlyDirectoryReader.cs
+Lucene.Net/Lucene.Net/Index/ReadOnlySegmentReader.cs
+Lucene.Net/Lucene.Net/Index/ReusableStringReader.cs
+Lucene.Net/Lucene.Net/Index/SegmentInfo.cs
+Lucene.Net/Lucene.Net/Index/SegmentInfos.cs
+Lucene.Net/Lucene.Net/Index/SegmentMergeInfo.cs
+Lucene.Net/Lucene.Net/Index/SegmentMergeQueue.cs
+Lucene.Net/Lucene.Net/Index/SegmentMerger.cs
+Lucene.Net/Lucene.Net/Index/SegmentReader.cs
+Lucene.Net/Lucene.Net/Index/SegmentTermDocs.cs
+Lucene.Net/Lucene.Net/Index/SegmentTermEnum.cs
+Lucene.Net/Lucene.Net/Index/SegmentTermPositions.cs
+Lucene.Net/Lucene.Net/Index/SegmentTermPositionVector.cs
+Lucene.Net/Lucene.Net/Index/SegmentTermVector.cs
+Lucene.Net/Lucene.Net/Index/SegmentWriteState.cs
+Lucene.Net/Lucene.Net/Index/SerialMergeScheduler.cs
+Lucene.Net/Lucene.Net/Index/SnapshotDeletionPolicy.cs
+Lucene.Net/Lucene.Net/Index/SortedTermVectorMapper.cs
+Lucene.Net/Lucene.Net/Index/StaleReaderException.cs
+Lucene.Net/Lucene.Net/Index/StoredFieldsWriter.cs
+Lucene.Net/Lucene.Net/Index/StoredFieldsWriterPerThread.cs
+Lucene.Net/Lucene.Net/Index/Term.cs
+Lucene.Net/Lucene.Net/Index/TermBuffer.cs
+Lucene.Net/Lucene.Net/Index/TermDocs.cs
+Lucene.Net/Lucene.Net/Index/TermEnum.cs
+Lucene.Net/Lucene.Net/Index/TermFreqVector.cs
+Lucene.Net/Lucene.Net/Index/TermInfo.cs
+Lucene.Net/Lucene.Net/Index/TermInfosReader.cs
+Lucene.Net/Lucene.Net/Index/TermInfosWriter.cs
+Lucene.Net/Lucene.Net/Index/TermPositions.cs
+Lucene.Net/Lucene.Net/Index/TermPositionVector.cs
+Lucene.Net/Lucene.Net/Index/TermsHash.cs
+Lucene.Net/Lucene.Net/Index/TermsHashConsumer.cs
+Lucene.Net/Lucene.Net/Index/TermsHashConsumerPerField.cs
+Lucene.Net/Lucene.Net/Index/TermsHashConsumerPerThread.cs
+Lucene.Net/Lucene.Net/Index/TermsHashPerField.cs
+Lucene.Net/Lucene.Net/Index/TermsHashPerThread.cs
+Lucene.Net/Lucene.Net/Index/TermVectorEntry.cs
+Lucene.Net/Lucene.Net/Index/TermVectorEntryFreqSortedComparator.cs
+Lucene.Net/Lucene.Net/Index/TermVectorMapper.cs
+Lucene.Net/Lucene.Net/Index/TermVectorOffsetInfo.cs
+Lucene.Net/Lucene.Net/Index/TermVectorsReader.cs
+Lucene.Net/Lucene.Net/Index/TermVectorsTermsWriter.cs
+Lucene.Net/Lucene.Net/Index/TermVectorsTermsWriterPerField.cs
+Lucene.Net/Lucene.Net/Index/TermVectorsTermsWriterPerThread.cs
+Lucene.Net/Lucene.Net/Index/TermVectorsWriter.cs
+Lucene.Net/Lucene.Net/LucenePackage.cs
+Lucene.Net/Lucene.Net/LZOCompressor.cs
+Lucene.Net/Lucene.Net/Messages/Message.cs
+Lucene.Net/Lucene.Net/Messages/MessageImpl.cs
+Lucene.Net/Lucene.Net/Messages/NLS.cs
+Lucene.Net/Lucene.Net/Messages/NLSException.cs
+Lucene.Net/Lucene.Net/QueryParser/CharStream.cs
+Lucene.Net/Lucene.Net/QueryParser/FastCharStream.cs
+Lucene.Net/Lucene.Net/QueryParser/MultiFieldQueryParser.cs
+Lucene.Net/Lucene.Net/QueryParser/ParseException.cs
+Lucene.Net/Lucene.Net/QueryParser/QueryParser.cs
+Lucene.Net/Lucene.Net/QueryParser/QueryParserConstants.cs
+Lucene.Net/Lucene.Net/QueryParser/QueryParserTokenManager.cs
+Lucene.Net/Lucene.Net/QueryParser/Token.cs
+Lucene.Net/Lucene.Net/QueryParser/TokenMgrError.cs
+Lucene.Net/Lucene.Net/Search/BooleanClause.cs
+Lucene.Net/Lucene.Net/Search/BooleanQuery.cs
+Lucene.Net/Lucene.Net/Search/BooleanScorer.cs
+Lucene.Net/Lucene.Net/Search/BooleanScorer2.cs
+Lucene.Net/Lucene.Net/Search/CachingSpanFilter.cs
+Lucene.Net/Lucene.Net/Search/CachingWrapperFilter.cs
+Lucene.Net/Lucene.Net/Search/Collector.cs
+Lucene.Net/Lucene.Net/Search/ComplexExplanation.cs
+Lucene.Net/Lucene.Net/Search/ConjunctionScorer.cs
+Lucene.Net/Lucene.Net/Search/ConstantScoreQuery.cs
+Lucene.Net/Lucene.Net/Search/ConstantScoreRangeQuery.cs
+Lucene.Net/Lucene.Net/Search/DefaultSimilarity.cs
+Lucene.Net/Lucene.Net/Search/DisjunctionMaxQuery.cs
+Lucene.Net/Lucene.Net/Search/DisjunctionMaxScorer.cs
+Lucene.Net/Lucene.Net/Search/DisjunctionSumScorer.cs
+Lucene.Net/Lucene.Net/Search/DocIdSet.cs
+Lucene.Net/Lucene.Net/Search/DocIdSetIterator.cs
+Lucene.Net/Lucene.Net/Search/ExactPhraseScorer.cs
+Lucene.Net/Lucene.Net/Search/Explanation.cs
+Lucene.Net/Lucene.Net/Search/ExtendedFieldCache.cs
+Lucene.Net/Lucene.Net/Search/FieldCache.cs
+Lucene.Net/Lucene.Net/Search/FieldCacheImpl.cs
+Lucene.Net/Lucene.Net/Search/FieldCacheRangeFilter.cs
+Lucene.Net/Lucene.Net/Search/FieldCacheTermsFilter.cs
+Lucene.Net/Lucene.Net/Search/FieldComparator.cs
+Lucene.Net/Lucene.Net/Search/FieldComparatorSource.cs
+Lucene.Net/Lucene.Net/Search/FieldDoc.cs
+Lucene.Net/Lucene.Net/Search/FieldDocSortedHitQueue.cs
+Lucene.Net/Lucene.Net/Search/FieldSortedHitQueue.cs
+Lucene.Net/Lucene.Net/Search/FieldValueHitQueue.cs
+Lucene.Net/Lucene.Net/Search/Filter.cs
+Lucene.Net/Lucene.Net/Search/FilteredDocIdSet.cs
+Lucene.Net/Lucene.Net/Search/FilteredDocIdSetIterator.cs
+Lucene.Net/Lucene.Net/Search/FilteredQuery.cs
+Lucene.Net/Lucene.Net/Search/FilteredTermEnum.cs
+Lucene.Net/Lucene.Net/Search/FilterManager.cs
+Lucene.Net/Lucene.Net/Search/Function/ByteFieldSource.cs
+Lucene.Net/Lucene.Net/Search/Function/CustomScoreProvider.cs
+Lucene.Net/Lucene.Net/Search/Function/CustomScoreQuery.cs
+Lucene.Net/Lucene.Net/Search/Function/DocValues.cs
+Lucene.Net/Lucene.Net/Search/Function/FieldCacheSource.cs
+Lucene.Net/Lucene.Net/Search/Function/FieldScoreQuery.cs
+Lucene.Net/Lucene.Net/Search/Function/FloatFieldSource.cs
+Lucene.Net/Lucene.Net/Search/Function/IntFieldSource.cs
+Lucene.Net/Lucene.Net/Search/Function/MultiValueSource.cs
+Lucene.Net/Lucene.Net/Search/Function/OrdFieldSource.cs
+Lucene.Net/Lucene.Net/Search/Function/ReverseOrdFieldSource.cs
+Lucene.Net/Lucene.Net/Search/Function/ShortFieldSource.cs
+Lucene.Net/Lucene.Net/Search/Function/ValueSource.cs
+Lucene.Net/Lucene.Net/Search/Function/ValueSourceQuery.cs
+Lucene.Net/Lucene.Net/Search/FuzzyQuery.cs
+Lucene.Net/Lucene.Net/Search/FuzzyTermEnum.cs
+Lucene.Net/Lucene.Net/Search/Hit.cs
+Lucene.Net/Lucene.Net/Search/HitCollector.cs
+Lucene.Net/Lucene.Net/Search/HitCollectorWrapper.cs
+Lucene.Net/Lucene.Net/Search/HitIterator.cs
+Lucene.Net/Lucene.Net/Search/HitQueue.cs
+Lucene.Net/Lucene.Net/Search/Hits.cs
+Lucene.Net/Lucene.Net/Search/IndexSearcher.cs
+Lucene.Net/Lucene.Net/Search/MatchAllDocsQuery.cs
+Lucene.Net/Lucene.Net/Search/MultiPhraseQuery.cs
+Lucene.Net/Lucene.Net/Search/MultiSearcher.cs
+Lucene.Net/Lucene.Net/Search/MultiTermQuery.cs
+Lucene.Net/Lucene.Net/Search/MultiTermQueryWrapperFilter.cs
+Lucene.Net/Lucene.Net/Search/NumericRangeFilter.cs
+Lucene.Net/Lucene.Net/Search/NumericRangeQuery.cs
+Lucene.Net/Lucene.Net/Search/ParallelMultiSearcher.cs
+Lucene.Net/Lucene.Net/Search/Payloads/AveragePayloadFunction.cs
+Lucene.Net/Lucene.Net/Search/Payloads/BoostingTermQuery.cs
+Lucene.Net/Lucene.Net/Search/Payloads/MaxPayloadFunction.cs
+Lucene.Net/Lucene.Net/Search/Payloads/MinPayloadFunction.cs
+Lucene.Net/Lucene.Net/Search/Payloads/PayloadFunction.cs
+Lucene.Net/Lucene.Net/Search/Payloads/PayloadNearQuery.cs
+Lucene.Net/Lucene.Net/Search/Payloads/PayloadSpanUtil.cs
+Lucene.Net/Lucene.Net/Search/Payloads/PayloadTermQuery.cs
+Lucene.Net/Lucene.Net/Search/PhrasePositions.cs
+Lucene.Net/Lucene.Net/Search/PhraseQuery.cs
+Lucene.Net/Lucene.Net/Search/PhraseQueue.cs
+Lucene.Net/Lucene.Net/Search/PhraseScorer.cs
+Lucene.Net/Lucene.Net/Search/PositiveScoresOnlyCollector.cs
+Lucene.Net/Lucene.Net/Search/PrefixFilter.cs
+Lucene.Net/Lucene.Net/Search/PrefixQuery.cs
+Lucene.Net/Lucene.Net/Search/PrefixTermEnum.cs
+Lucene.Net/Lucene.Net/Search/Query.cs
+Lucene.Net/Lucene.Net/Search/QueryFilter.cs
+Lucene.Net/Lucene.Net/Search/QueryTermVector.cs
+Lucene.Net/Lucene.Net/Search/QueryWrapperFilter.cs
+Lucene.Net/Lucene.Net/Search/RangeFilter.cs
+Lucene.Net/Lucene.Net/Search/RangeQuery.cs
+Lucene.Net/Lucene.Net/Search/ReqExclScorer.cs
+Lucene.Net/Lucene.Net/Search/ReqOptSumScorer.cs
+Lucene.Net/Lucene.Net/Search/ScoreCachingWrappingScorer.cs
+Lucene.Net/Lucene.Net/Search/ScoreDoc.cs
+Lucene.Net/Lucene.Net/Search/ScoreDocComparator.cs
+Lucene.Net/Lucene.Net/Search/Scorer.cs
+Lucene.Net/Lucene.Net/Search/Searchable.cs
+Lucene.Net/Lucene.Net/Search/Searcher.cs
+Lucene.Net/Lucene.Net/Search/Similarity.cs
+Lucene.Net/Lucene.Net/Search/SimilarityDelegator.cs
+Lucene.Net/Lucene.Net/Search/SloppyPhraseScorer.cs
+Lucene.Net/Lucene.Net/Search/Sort.cs
+Lucene.Net/Lucene.Net/Search/SortComparator.cs
+Lucene.Net/Lucene.Net/Search/SortComparatorSource.cs
+Lucene.Net/Lucene.Net/Search/SortField.cs
+Lucene.Net/Lucene.Net/Search/SpanFilter.cs
+Lucene.Net/Lucene.Net/Search/SpanFilterResult.cs
+Lucene.Net/Lucene.Net/Search/SpanQueryFilter.cs
+Lucene.Net/Lucene.Net/Search/Spans/FieldMaskingSpanQuery.cs
+Lucene.Net/Lucene.Net/Search/Spans/NearSpansOrdered.cs
+Lucene.Net/Lucene.Net/Search/Spans/NearSpansUnordered.cs
+Lucene.Net/Lucene.Net/Search/Spans/SpanFirstQuery.cs
+Lucene.Net/Lucene.Net/Search/Spans/SpanNearQuery.cs
+Lucene.Net/Lucene.Net/Search/Spans/SpanNotQuery.cs
+Lucene.Net/Lucene.Net/Search/Spans/SpanOrQuery.cs
+Lucene.Net/Lucene.Net/Search/Spans/SpanQuery.cs
+Lucene.Net/Lucene.Net/Search/Spans/Spans.cs
+Lucene.Net/Lucene.Net/Search/Spans/SpanScorer.cs
+Lucene.Net/Lucene.Net/Search/Spans/SpanTermQuery.cs
+Lucene.Net/Lucene.Net/Search/Spans/SpanWeight.cs
+Lucene.Net/Lucene.Net/Search/Spans/TermSpans.cs
+Lucene.Net/Lucene.Net/Search/TermQuery.cs
+Lucene.Net/Lucene.Net/Search/TermRangeFilter.cs
+Lucene.Net/Lucene.Net/Search/TermRangeQuery.cs
+Lucene.Net/Lucene.Net/Search/TermRangeTermEnum.cs
+Lucene.Net/Lucene.Net/Search/TermScorer.cs
+Lucene.Net/Lucene.Net/Search/TimeLimitedCollector.cs
+Lucene.Net/Lucene.Net/Search/TimeLimitingCollector.cs
+Lucene.Net/Lucene.Net/Search/TopDocCollector.cs
+Lucene.Net/Lucene.Net/Search/TopDocs.cs
+Lucene.Net/Lucene.Net/Search/TopDocsCollector.cs
+Lucene.Net/Lucene.Net/Search/TopFieldCollector.cs
+Lucene.Net/Lucene.Net/Search/TopFieldDocCollector.cs
+Lucene.Net/Lucene.Net/Search/TopFieldDocs.cs
+Lucene.Net/Lucene.Net/Search/TopScoreDocCollector.cs
+Lucene.Net/Lucene.Net/Search/Weight.cs
+Lucene.Net/Lucene.Net/Search/WildcardQuery.cs
+Lucene.Net/Lucene.Net/Search/WildcardTermEnum.cs
+Lucene.Net/Lucene.Net/Store/AlreadyClosedException.cs
+Lucene.Net/Lucene.Net/Store/BufferedIndexInput.cs
+Lucene.Net/Lucene.Net/Store/BufferedIndexOutput.cs
+Lucene.Net/Lucene.Net/Store/CheckSumIndexInput.cs
+Lucene.Net/Lucene.Net/Store/CheckSumIndexOutput.cs
+Lucene.Net/Lucene.Net/Store/Directory.cs
+Lucene.Net/Lucene.Net/Store/FileSwitchDirectory.cs
+Lucene.Net/Lucene.Net/Store/FSDirectory.cs
+Lucene.Net/Lucene.Net/Store/FSLockFactory.cs
+Lucene.Net/Lucene.Net/Store/IndexInput.cs
+Lucene.Net/Lucene.Net/Store/IndexOutput.cs
+Lucene.Net/Lucene.Net/Store/Lock.cs
+Lucene.Net/Lucene.Net/Store/LockFactory.cs
+Lucene.Net/Lucene.Net/Store/LockObtainFailedException.cs
+Lucene.Net/Lucene.Net/Store/LockReleaseFailedException.cs
+Lucene.Net/Lucene.Net/Store/LockStressTest.cs
+Lucene.Net/Lucene.Net/Store/LockVerifyServer.cs
+Lucene.Net/Lucene.Net/Store/MMapDirectory.cs
+Lucene.Net/Lucene.Net/Store/NativeFSLockFactory.cs
+Lucene.Net/Lucene.Net/Store/NIOFSDirectory.cs
+Lucene.Net/Lucene.Net/Store/NoLockFactory.cs
+Lucene.Net/Lucene.Net/Store/NoSuchDirectoryException.cs
+Lucene.Net/Lucene.Net/Store/RAMDirectory.cs
+Lucene.Net/Lucene.Net/Store/RAMFile.cs
+Lucene.Net/Lucene.Net/Store/RAMInputStream.cs
+Lucene.Net/Lucene.Net/Store/RAMOutputStream.cs
+Lucene.Net/Lucene.Net/Store/SimpleFSDirectory.cs
+Lucene.Net/Lucene.Net/Store/SimpleFSLockFactory.cs
+Lucene.Net/Lucene.Net/Store/SingleInstanceLockFactory.cs
+Lucene.Net/Lucene.Net/Store/VerifyingLockFactory.cs
+Lucene.Net/Lucene.Net/SupportClass.cs
+Lucene.Net/Lucene.Net/Util/ArrayUtil.cs
+Lucene.Net/Lucene.Net/Util/Attribute.cs
+Lucene.Net/Lucene.Net/Util/AttributeImpl.cs
+Lucene.Net/Lucene.Net/Util/AttributeSource.cs
+Lucene.Net/Lucene.Net/Util/AverageGuessMemoryModel.cs
+Lucene.Net/Lucene.Net/Util/BitUtil.cs
+Lucene.Net/Lucene.Net/Util/BitVector.cs
+Lucene.Net/Lucene.Net/Util/Cache/Cache.cs
+Lucene.Net/Lucene.Net/Util/Cache/SimpleLRUCache.cs
+Lucene.Net/Lucene.Net/Util/Cache/SimpleMapCache.cs
+Lucene.Net/Lucene.Net/Util/CloseableThreadLocal.cs
+Lucene.Net/Lucene.Net/Util/Constants.cs
+Lucene.Net/Lucene.Net/Util/DocIdBitSet.cs
+Lucene.Net/Lucene.Net/Util/FieldCacheSanityChecker.cs
+Lucene.Net/Lucene.Net/Util/IndexableBinaryStringTools.cs
+Lucene.Net/Lucene.Net/Util/MapOfSets.cs
+Lucene.Net/Lucene.Net/Util/MemoryModel.cs
+Lucene.Net/Lucene.Net/Util/NumericUtils.cs
+Lucene.Net/Lucene.Net/Util/OpenBitSet.cs
+Lucene.Net/Lucene.Net/Util/OpenBitSetDISI.cs
+Lucene.Net/Lucene.Net/Util/OpenBitSetIterator.cs
+Lucene.Net/Lucene.Net/Util/Parameter.cs
+Lucene.Net/Lucene.Net/Util/PriorityQueue.cs
+Lucene.Net/Lucene.Net/Util/RamUsageEstimator.cs
+Lucene.Net/Lucene.Net/Util/ReaderUtil.cs
+Lucene.Net/Lucene.Net/Util/ScorerDocQueue.cs
+Lucene.Net/Lucene.Net/Util/SimpleStringInterner.cs
+Lucene.Net/Lucene.Net/Util/SmallFloat.cs
+Lucene.Net/Lucene.Net/Util/SortedVIntList.cs
+Lucene.Net/Lucene.Net/Util/SorterTemplate.cs
+Lucene.Net/Lucene.Net/Util/StringHelper.cs
+Lucene.Net/Lucene.Net/Util/StringInterner.cs
+Lucene.Net/Lucene.Net/Util/ToStringUtils.cs
+Lucene.Net/Lucene.Net/Util/UnicodeUtil.cs
+Lucene.Net/Lucene.Net/Util/Version.cs
diff --git a/mcs/tools/monkeydoc/monkeydoc_test.dll.sources b/mcs/tools/monkeydoc/monkeydoc_test.dll.sources
new file mode 100644 (file)
index 0000000..d57ee1e
--- /dev/null
@@ -0,0 +1,2 @@
+Monkeydoc/HelpSourceTests.cs
+Monkeydoc.Ecma/EcmaUrlTests.cs
\ No newline at end of file
index 0033472a8ad0104c41c6f9922d1100770d22c7cc..e0e52691b17fc19ae98151c65717519b391b951a 100644 (file)
@@ -845,7 +845,7 @@ public class EcmaHelpSource : HelpSource {
                
                if ((membername == "op_Implicit" || membername == "op_Explicit") && argtypes.Length == 2) {
                        isoperator = true;
-                       membername = "Conversion";
+                       membername = membername.EndsWith ("Implicit") ? "ImplicitConversion" : "ExplicitConversion";
                        member = argtypes[0] + " to " + argtypes[1];
                } else if (membername.StartsWith("op_")) {
                        isoperator = true;
@@ -991,7 +991,7 @@ public class EcmaHelpSource : HelpSource {
                        // conversion operators: overloading based on parameter and return type [ECMA-335 ยง10.3.3]
                        case "op_Implicit":                    // static implicit operator R (T)
                        case "op_Explicit":                    // static explicit operator R (T)
-                               nicename = "Conversion";
+                               nicename = name.EndsWith ("Implicit") ? "ImplicitConversion" : "ExplicitConversion";
                                string arg = n.SelectSingleNode("Parameters/Parameter/@Type").InnerText;
                                string ret = n.SelectSingleNode("ReturnValue/ReturnType").InnerText;
                                sig = EcmaDoc.ConvertCTSName(arg) + " to " + EcmaDoc.ConvertCTSName(ret);
@@ -2183,8 +2183,8 @@ public class EcmaHelpSource : HelpSource {
                                                                .Concat (ncnodes.Where (n => n.Nodes.Count > 0).SelectMany (n => n.Nodes.Cast<Node> ()));
                                                } else if (c.Caption == "Operators") {
                                                        ncnodes = ncnodes
-                                                               .Where (n => n.Caption != "Conversion")
-                                                               .Concat (ncnodes.Where (n => n.Caption == "Conversion").SelectMany (n => n.Nodes.Cast<Node> ()));
+                                                               .Where (n => !n.Caption.EndsWith ("Conversion"))
+                                                               .Concat (ncnodes.Where (n => n.Caption.EndsWith ("Conversion")).SelectMany (n => n.Nodes.Cast<Node> ()));
                                                }
                                                foreach (Node nc in ncnodes) {
                                                        //xpath to the docs xml node
index d79ce079e192b68c007443df35e503136f3997f1..180ae0d1f4eb2c6b158cba2780a5221680099aca 100644 (file)
@@ -132,7 +132,6 @@ public class Node : IComparable {
        Node parent;
        protected ArrayList nodes;
        protected internal int position;
-       string compare_key;
 
        static ArrayList empty = ArrayList.ReadOnly(new ArrayList(0));
 
@@ -394,19 +393,25 @@ public class Node : IComparable {
                        LoadNode ();
                if (other.position < 0)
                        other.LoadNode ();
-               if (compare_key == null || other.compare_key == null) {
-                       Regex digits = new Regex (@"([\d]+)|([^\d]+)");
-                       MatchEvaluator eval = delegate (Match m) {
-                               return (m.Value.Length > 0 && char.IsDigit (m.Value [0]))
-                               ? m.Value.PadLeft (System.Math.Max (caption.Length, other.caption.Length)) 
-                               : m.Value;
-                       };
-                       if (compare_key == null)
-                               compare_key = digits.Replace (caption, eval);
-                       if (other.compare_key == null)
-                               other.compare_key = digits.Replace (other.caption, eval);
-               }
-               return compare_key.CompareTo (other.compare_key);
+
+               var cap1 = caption;
+               var cap2 = other.caption;
+
+               /* Some node (notably from ecmaspec) have number prepended to them
+                * which we need to sort better by padding them to the same number
+                * of digits
+                */
+               if (char.IsDigit (cap1[0]) && char.IsDigit (cap2[0])) {
+                       int c1 = cap1.TakeWhile (char.IsDigit).Count ();
+                       int c2 = cap2.TakeWhile (char.IsDigit).Count ();
+
+                       if (c1 != c2) {
+                               cap1 = cap1.PadLeft (cap1.Length + Math.Max (0, c2 - c1), '0');
+                               cap2 = cap2.PadLeft (cap2.Length + Math.Max (0, c1 - c2), '0');
+                       }
+               }
+
+               return string.Compare (cap1, cap2, StringComparison.OrdinalIgnoreCase);
        }
 }